pax_global_header00006660000000000000000000000064125557103410014515gustar00rootroot0000000000000052 comment=a59f1b6031cbaa5ae200ab5098826b7393492082 ffc-1.6.0/000077500000000000000000000000001255571034100122575ustar00rootroot00000000000000ffc-1.6.0/.bzrignore000066400000000000000000000001301255571034100142530ustar00rootroot00000000000000syntax: glob (^|/)CVS($|/) (^|/)\.hg($|/) ./*~ ./*.pyc ./build/* ./demo/*.py ./demo/*.h ffc-1.6.0/.gitignore000066400000000000000000000007141255571034100142510ustar00rootroot00000000000000# Compiled source *.o *.Plo *.Po *.lo *.la *.a *.os *.pyc *.so *.pc *.pyd *.def *.dll *.exe *.dylib # CMake and Make files CMakeCache.txt CMakeFiles cmake_install.cmake cmake_uninstall.cmake Makefile install_manifest.txt /cmake/templates/UFCConfig.cmake /cmake/templates/UFCConfigVersion.cmake /cmake/templates/UseUFC.cmake # Temporaries *~ # OS X files .DS_Store .DS_Store? # Local build files /build # SWIG generated files /ufc/ufc.py /ufc/ufc_wrap.cpp ffc-1.6.0/AUTHORS000066400000000000000000000033561255571034100133360ustar00rootroot00000000000000Credits for FFC =============== Main authors: Anders Logg email: logg@simula.no www: http://home.simula.no/~logg/ Kristian B. Ølgaard email: k.b.oelgaard@gmail.com Marie Rognes email: meg@simula.no Main contributors: Garth N. Wells email: gnw20@cam.ac.uk www: http://www.eng.cam.ac.uk/~gnw20/ Contributors: Jan Blechta email: blechta@karlin.mff.cuni.cz Peter Brune email: brune@uchicago.edu Joachim B Haga email: jobh@broadpark.no Johan Jansson email: johanjan@math.chalmers.se www: http://www.math.chalmers.se/~johanjan/ Robert C. Kirby email: kirby@cs.uchicago.edu www: http://people.cs.uchicago.edu/~kirby/ Matthew G. Knepley email: knepley@mcs.anl.gov www: http://www-unix.mcs.anl.gov/~knepley/ Dag Lindbo email: dag@f.kth.se www: http://www.f.kth.se/~dag/ Ola Skavhaug email: skavhaug@simula.no www: http://home.simula.no/~skavhaug/ Andy R. Terrel email: aterrel@uchicago.edu www: http://people.cs.uchicago.edu/~aterrel/ Credits for UFC =============== UFC was merged into FFC 2014-02-18. Below is the list of credits for UFC at the time of the merge. Main authors: Martin Sandve Alnaes Anders Logg Kent-Andre Mardal Hans Petter Langtangen Main contributors: Asmund Odegard Kristian Oelgaard Johan Hake Garth N. Wells Marie E. Rognes Johannes Ring ffc-1.6.0/COPYING000066400000000000000000001045131255571034100133160ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . ffc-1.6.0/COPYING.LESSER000066400000000000000000000167271255571034100143230ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. ffc-1.6.0/ChangeLog000066400000000000000000000516361255571034100140440ustar00rootroot000000000000001.6.0 [2015-07-28] - Rename and modify a number of UFC interface functions. See docstrings in ufc.h for details. - Bump required SWIG version to 3.0.3 - Disable dual basis (tabulate_coordinates and evaluate_dofs) for enriched elements until correct implementation is brought up 1.5.0 [2015-01-12] - Remove FErari support - Add support for new integral type custom_integral - Support for new form compiler backend "uflacs", downloaded separately 1.4.0 [2014-06-02] - Add support for integrals that know which coefficients they use - Many bug fixes for facet integrals over manifolds - Merge UFC into FFC; ChangeLog for UFC appended below - Various updates mirroring UFL changes - Experimental: New custom integral with user defined quadrature points 1.3.0 [2014-01-07] - Fix bug with runtime check of SWIG version - Move DOLFIN wrappers here from DOLFIN - Add support for new UFL operators cell_avg and facet_avg - Add new reference data handling system, now data is kept in an external repository - Fix bugs with ignoring quadrature rule arguments - Use cpp optimization by default in jit compiler 1.2.0 [2013-03-24] - New feature: Add basic support for point integrals on vertices - New feature: Add general support for m-dimensional cells in n-dimensional space (n >= m, n, m = 1, 2, 3) 1.1.0 [2013-01-07] - Fix bug for Conditionals related to DG constant Coefficients. Bug #1082048. - Fix bug for Conditionals, precedence rules for And and Or. Bug #1075149. - Changed data structure from list to deque when pop(0) operation is needed, speeding up split_expression operation considerable - Other minor fixes 1.0.0 [2011-12-07] - Issue warning when form integration requires more than 100 points 1.0-rc1 [2011-11-28] - Fix bug with coordinates on facet integrals (intervals). Bug #888682. - Add support for FacetArea, new geometric quantity in UFL. - Fix bug in optimised quadrature code, AlgebraOperators demo. Bug #890859. - Fix bug with undeclared variables in optimised quadrature code. Bug #883202. 1.0-beta2 [2011-10-11] - Added support for bessel functions, bessel_* (I,J,K,Y), in UFL. - Added support for error function, erf(), new math function in UFL. - Fix dof map 'need_entities' for Real spaces - Improve performance for basis function computation 1.0-beta [2011-08-11] - Improve formatting of floats with up to one non-zero decimal place. - Fix bug involving zeros in products and sums. Bug #804160. - Fix bug for new conditions '&&', '||' and '!' in UFL. Bug #802560. - Fix bug involving VectorElement with dim=1. Bug #798578. - Fix bug with mixed element of symmetric tensor elements. Bug #745646. - Fix bug when using geometric coordinates with one quadrature point 0.9.10 [2011-05-16] - Change license from GPL v3 or later to LGPL v3 or later - Add some schemes for low-order simplices - Request quadrature schemes by polynomial degree (not longer by number of points in each direction) - Get quadrature schemes via ffc.quadrature_schemes - Improved lock handling in JIT compiler - Include common_cell in form signature - Add possibility to set swig binary and swig path 0.9.9 [2011-02-23] - Add support for generating error control forms with option -e - Updates for UFC 2.0 - Set minimal degree to 1 in automatic degree selection for expressions - Add command-line option -f no_ferari - Add support for plotting of elements - Add utility function compute_tensor_representation 0.9.4 [2010-09-01] - Added memory cache in jit(), for preprocessed forms - Added support for Conditional and added demo/Conditional.ufl. - Added support for new geometric quantity Circumradius in UFL. - Added support for new geometric quantity CellVolume in UFL. 0.9.3 [2010-07-01] - Make global_dimension for Real return an int instead of double, bug # 592088 - Add support for facet normal in 1D. - Expose -feliminate_zeros for quadrature optimisations to give user more control - Remove return of form in compile_form - Remove object_names argument to compile_element - Rename ElementUnion -> EnrichedElement - Add support for tan() and inverse trigonometric functions - Added support for ElementUnion (i.e. span of combinations of elements) - Added support for Bubble elements - Added support for UFL.SpatialCoordinate. 0.9.2 [2010-02-17] - Bug fix in removal of unused variables in Piola-mapped terms for tensor representation 0.9.1 [2010-02-15] - Add back support for FErari optimizations - Bug fixes in JIT compiler 0.9.0 [2010-02-02] - Updates for FIAT 0.9.0 - Updates for UFC 1.4.0 (now supporting the full interface) - Automatic selection of representation - Change quadrature_order --> quadrature_degree - Split compile() --> compile_form(), compile_element() - Major cleanup and reorganization of code (flatter directories) - Updates for changes in UFL: Argument, Coefficient, FormData 0.7.1 - Handle setting quadrature degree when it is set to None in UFL form - Added demo: HyperElasticity.ufl 0.7.0 - Move contents of TODO to: https://blueprints.launchpad.net/ffc - Support for restriction of finite elements to only consider facet dofs - Use quadrature_order from metadata when integrating terms using tensor representation - Use loop to reset the entries of the local element tensor - Added new symbolic classes for quadrature optimisation (speed up compilation) - Added demos: Biharmonic.ufl, div(grad(v)) term; ReactionDiffusion.ufl, tuple notation; MetaData.ufl, how to attach metadata to the measure; ElementRestriction.ufl, restriction of elements to facets - Tabulate the coordinates of the integration points in the tabulate_tensor() function - Change command line option '-f split_implementation' -> '-f split' - Renaming of files and restructuring of the compiler directory - Added option -q rule (--quadrature-rule rule) to specify which rule to use for integration of a given integral. (Can also bet set through the metadata through "quadrature_rule"). No rules have yet been implemented, so default is the FIAT rule. - Remove support for old style .form files/format 0.6.2 [2009-04-07] - Experimental support for UFL, supporting both .form and .ufl - Moved configuration and construction of python extension module to ufc_module 0.6.1 [2009-02-18] - Initial work on UFL transition - Minor bug fixes - The version of ufc and swig is included in the form signature - Better system configuration for JIT compiled forms - The JIT compiled python extension module use shared_ptr for all classes 0.6.0 [2009-01-05] - Update DOLFIN output format (-l dolfin) for DOLFIN 0.9.0 - Cross-platform fixes for test scripts - Minor bug fix for quadrature code generation (forms affected by this bug would not be able to compile - Fix bug with output of *.py. - Permit dot product bewteen rectangular matrices (Frobenius norm) 0.5.1 [2008-10-20] - New operator skew() - Allow JIT compilation of elements and dof maps - Rewrite JIT compiler to rely on Instant for caching - Display flop count for evaluating the element tensor during compilation - Add arguments language and representation to options dictionary - Fix installation on Windows - Add option -f split_implementation for separate .h and .cpp files 0.5.0 [2008-06-23] - Remove default restriction +/- for Constant - Make JIT optimization (-O0 / -O2) optional - Add in-memory cache to speed up JIT compiler for repeated assembly - Allow subdomain integrals without needing full range of integrals - Allow simple subdomain integral specification dx(0), dx(1), ds(0) etc 0.4.5 [2008-04-30] - Optimizations in generated quadrature code - Change formatting of floats from %g to %e, fixes problem with too long integers - Bug fix for order of values in interpolate_vertex_values, now according to UFC - Speed up JIT compiler - Add index ranges to form printing - Throw runtime error in functions not generated - Update DOLFIN format for new location of include files 0.4.4 [2008-02-18] - RT, BDM, BDFM and Nedelec now working in 2D and 3D - New element type QuadratureElement - Add support for 1D elements - Add experimental support for new Darcy-Stokes element - Use FIAT transformed spaces instead of mapping in FFC - Updates for UFC 1.1 - Implement caching of forms/modules in ~/.ffc/cache for JIT compiler - Add script ffc-clean - New operators lhs() and rhs() - Bug fixes in simplify - Bug fixes for Nedelec and BDFM - Fix bug in mult() - Fix bug with restrictions on exterior facet integrals - Fix bug in grad() for vectors - Add divergence operator for matrices 0.4.3 [2007-10-23] - Require FIAT to use UFC reference cells - Fix bug in form simplification - Rename abs --> modulus to avoid conflict with builtin abs - Fix bug in operators invert, abs, sqrt - Fix bug in integral tabulation - Add BDFM and Nedelec elements (nonworking) - Fix bug in JIT compiler 0.4.2 [2007-08-31] - Change license from GPL v2 to GPL v3 or later - Add JIT (just-in-time) compiler - Fix bug for constants on interior facets 0.4.1 [2007-06-22] - Fix bug in simplification of forms - Optimize removal of unused terms in code formattting 0.4.0 [2007-06-20] - Move to UFC interface for code generation - Major rewrite, restructure, cleanup - Add support for Brezzi-Douglas-Marini (BDM) elements - Add support for Raviart-Thomas (RT) elements - Add support for Discontinuous Galerkin (DG) methods - Operators jump() and avg() - Add quadrature compilation mode (experimental) - Simplification of forms - Operators sqrt(), abs() and inverse - Improved Python interface - Add flag -f precision=n - Generate code for basis functions and derivatives - Use Set from set module for Python2.3 compatibility 0.3.5 [2006-12-01] - Bug fixes - Move from Numeric to numpy 0.3.4 [2006-10-27] - Updates for new DOLFIN mesh library - Add support for evaluation of functionals - Add operator outer() for outer product of vector-valued functions - Enable optimization of linear forms (in addition to bilinear forms) - Remove DOLFIN SWIG format - Fix bug in ffc -v/--version (thanks to Ola Skavhaug) - Consolidate DOLFIN and DOLFIN SWIG formats (patch from Johan Jansson) - Fix bug in optimized compilation (-O) for some forms ("too many values to unpack") 0.3.3 [2006-09-05] - Fix bug in operator div() - Add operation count (number of multiplications) with -d0 - Add hint for printing more informative error messages (flag -d1) - Modify implementation of vertexeval() - Add support for boundary integrals (Garth N. Wells) 0.3.2 [2006-04-01] - Add support for FErari optimizations, new flag -O 0.3.1 [2006-03-28] - Remove verbose output: silence means success - Generate empty boundary integral eval() to please Intel C++ compiler - New classes TestFunction and TrialFunction 0.3.0 [2006-03-01] - Work on manual, document command-line and user-interfaces - Name change: u --> U - Add compilation of elements without form - Add generation of FiniteElementSpec in DOLFIN formats - Fix bugs in raw and XML formats - Fix bug in LaTeX format - Fix path and predefine tokens to enable import in .form file - Report number of entries in reference tensor during compilation 0.2.5 [2005-12-28] - Add demo Stabilization.form - Further speedup computation of reference tensor (use ufunc Numeric.add) 0.2.4 [2005-12-05] - Report time taken to compute reference tensor - Restructure computation of reference tensor to use less memory. As a side effect, the speed has also been improved. - Update for DOLFIN name change node --> vertex - Update finite element interface for DOLFIN - Check for FIAT bug in discontinuous vector Lagrange elements - Fix signatures for vector-valued elements 0.2.3 [2005-11-28] - New fast Numeric/BLAS based algorithm for computing reference tensor - Bug fix: reassign indices for complete subexpressions - Bug fix: operator Function * Integral - Check tensor notation for completeness - Bug fix: mixed elements with more than two function spaces - Don't declare unused coefficients (or gcc will complain) 0.2.2 [2005-11-14] - Add command-line argument -v / --version - Add new operator mean() for projection onto piecewise constants - Add support for projections - Bug fix for higher order mixed elements: declaration of edge/face_ordering - Generate code for sub elements of mixed elements - Add new test form: TensorWeighteLaplacian - Add new test form: EnergyNorm - Fix bugs in mult() and vec() (skavhaug) - Reset correct entries of G for interior in BLAS mode - Only assign to entries of G that meet nonzero entries of A in BLAS mode 0.2.1 [2005-10-11] - Only generate declarations that are needed according to format - Check for missing options and add missing default options - Simplify usage of FFC as Python module: from ffc import * - Fix bug in division with constants - Generate output for BLAS (with option -f blas) - Add new XML output format - Remove command-line option --license (collect in compiler options -f) - Modify demo Mass.form to use 3:rd order Lagrange on tets - Fix bug in dofmap() for equal order mixed elements - Add compiler option -d debuglevel - Fix Python Numeric bug: vdot --> dot 0.2.0 [2005-09-23] - Generate function vertexeval() for evaluation at vertices - Add support for arbitrary mixed elements - Add man page - Work on manual, chapters on form language, quickstart and installation - Handle exceptions gracefully in command-line interface - Use new template fenicsmanual.cls for manual - Add new operators grad, div, rot (curl), D, rank, trace, dot, cross - Factorize common reference tensors from terms with equal signatures - Collect small building blocks for form algebra in common module tokens.py 0.1.9 [2005-07-05] - Complete support for general order Lagrange elements on triangles and tetrahedra - Compute reordering of dofs on tets correctly - Update manual with ordering of dofs - Break compilation into two phases: build() and write() - Add new output format ASE (Matt Knepley) - Improve python interface to FFC - Remove excessive logging at compilation - Fix bug in raw output format 0.1.8 [2005-05-17] - Access data through map in DOLFIN format - Experimental support for computation of coordinate maps - Add first draft of manual - Experimental support for computation of dof maps - Allow specification of the number of components for vector Lagrange - Count the number of zeros dropped - Fix bug in handling command-line arguments - Use module sets instead of built-in set (fix for Python 2.3) - Handle constant indices correctly (bug reported by Garth N. Wells) 0.1.7 [2005-05-02] - Write version number to output - Add command-line option for choosing license - Display usage if no input is given - Bug fix for finding correct prefix of file name - Automatically choose name of output file (if not supplied) - Use FIAT tabulation mode for vector-valued elements (speedup a factor 5) - Use FIAT tabulation mode for scalar elements (speedup a factor 1000) - Fig bug in demo elasticity.form (change order of u and v) - Make references to constants const in DOLFIN format - Don't generate code for unused entries of geometry tensor - Update formats to write numeric constants with full precision 0.1.6 [2005-03-17] - Add support for mixing multiple different finite elements - Add support for division with constants - Fix index bug (reverse order of multi-indices) 0.1.5 [2005-03-14] - Automatically choose the correct quadrature rule for precomputation - Add test program for verification of FIAT quadrature rules - Fix bug for derivative of sum - Improve common interface for debugging: add indentation - Add support for constants - Fix bug for sums of more than one term (make copies of references in lists) - Add '_' in naming of geometry tensor (needed for large dimensions) - Add example elasticity.form - Cleanup build_indices() 0.1.4-1 [2005-02-07] - Fix version number and remove build directory from tarball 0.1.4 [2005-02-04] - Fix bug for systems, seems to work now - Add common interface for debugging - Modify DOLFIN output to initialize functions - Create unique numbers for each function - Use namespaces for DOLFIN output instead of class names - Temporary implementation of dof mapping for vector-valued elements - Make DOLFIN output format put entries into PETSc block - Change name of coefficient data: c%d[%d] -> c[%d][%d] - Change ordering of basis functions (one component at a time) - Add example poissonsystem.form - Modifications for new version of FIAT (FIAT-L) FIAT version 0.1 a factor 5 slower (no memoization) FIAT version 0.1.1 a little faster, only a factor 2 slower - Add setup.py script 0.1.3 [2004-12-06] - Fix bug in DOLFIN format (missing value when zero) - Add output of reference tensor to LaTeX format - Make raw output format print data with full precision - Add component diagram - Change order of declaration of basis functions - Add new output format raw 0.1.2 [2004-11-17] - Add command-line interface ffc - Add support for functions (coefficients) - Add support for constants - Allow multiple forms (left- and right-hand side) in same file - Add test examples: poisson.form, mass.form, navierstokes.form - Wrap FIAT to create vector-valued finite element spaces - Check ranks of operands - Clean up algebra, add base class Element - Add some documentation (class diagram) - Add support for LaTeX output 0.1.1-1 [2004-11-10] - Add missing file declaration.py 0.1.1 [2004-11-10] - Make output variable names configurable - Clean up DOLFIN code generation - Post-process form to create reference, geometry, and element tensors - Experimental support for general tensor-valued elements - Clean up and improve index reassignment - Use string formatting for generation of output - Change index ordering to access row-wise 0.1.0 [2004-10-22] - First iteration of the FEniCS Form Compiler - Change boost::shared_ptr --> std::shared_ptr ChangeLog for UFC ================= UFC was merged into FFC 2014-02-18. Below is the ChangeLog for UFC at the time of the merge. From this point onward, UFC version numbering restarts at the same version number as FFC and the rest of FEniCS. 2.3.0 [2014-01-07] - Use std::vector > for topology data - Remove vertex coordinates from ufc::cell - Improve detection of compatible Python libraries - Add current swigversion to the JIT compiled extension module - Remove dofmap::max_local_dimension() - Remove cell argument from dofmap::local_dimension() 2.2.0 [2013-03-24] - Add new class ufc::point_integral - Use CMake to configure JIT compilation of forms - Generate UseUFC.cmake during configuration - Remove init_mesh(), init_cell(), init_mesh_finalize() - Remove ufc::mesh and add a vector of num_mesh_entities to global_dimension() and tabulate_dofs(). 2.1.0 [2013-01-07] - Fix bug introduced by SWIG 2.0.5, which treated uint as Python long - Add optimization SWIG flags, fixing bug lp:987657 2.0.5 [2011-12-07] - Improve configuration of libboost-math 2.0.4 [2011-11-28] - Add boost_math_tr1 to library flags when JIT compiling an extension module 2.0.3 [2011-10-26] - CMake config improvements 2.0.2 [2011-08-11] - Some tweaks of installation 2.0.1 [2011-05-16] - Make SWIG version >= 2.0 a requirement - Add possibility to set swig binary and swig path - Add missing const for map_{from,to}_reference_cell 2.0.0 [2011-02-23] - Add quadrature version of tabulate_tensor - Add finite_element::map_{from,to}_reference_cell - Add finite_element::{topological,geometric}_dimension - Add dofmap::topological_dimension - Rename num_foo_integrals --> num_foo_domains - Rename dof_map --> dofmap - Add finite_element::create - Add dofmap::create 1.4.2 [2010-09-01] - Move to CMake build system 1.4.1 [2010-07-01] - Make functions introduced in UFC 1.1 mandatory (now pure virtual) - Update templates to allow constructor arguments in form classes 1.4.0 [2010-02-01] - Changed behavior of create_foo_integral (returning 0 when integral is 0) - Bug fixes in installation 1.2.0 [2009-09-23] - Add new function ufc::dof_map::max_local_dimension() - Change ufc::dof_map::local_dimension() to ufc::dof_map::local_dimension(const ufc::cell c) 1.1.2 [2009-04-07] - Added configuration and building of python extension module to ufc_utils.build_ufc_module 1.1.1 [2009-02-20] - The extension module is now not built, if the conditions for shared_ptr are not met - Added SCons build system - The swig generated extension module will be compiled with shared_ptr support if boost is found on system and swig is of version 1.3.35 or higher - The swig generated extension module is named ufc.py and expose all ufc base classes to python - Added a swig generated extention module to ufc. UFC now depends on swig - Changed name of the python utility module from "ufc" to "ufc_utils" 1.1.0 [2008-02-18] - Add new function ufc::finite_element::evaluate_dofs - Add new function ufc::finite_element::evaluate_basis_all - Add new function ufc::finite_element::evaluate_basis_derivatives_all - Add new function ufc::dof_map::geometric_dimension - Add new function ufc::dof_map::num_entity_dofs - Add new function ufc::dof_map::tabulate_entity_dofs 1.0.0 [2007-06-17] - Release of UFC 1.0 ffc-1.6.0/INSTALL000066400000000000000000000012561255571034100133140ustar00rootroot00000000000000To install FFC, type sudo python setup.py install This will install FFC in the default Python path of your system, something like /usr/lib/python2.6/site-packages/. To specify C++ compiler and/or compiler flags used for compiling UFC and JITing, set environment variables CXX, CXXFLAGS respectively before invoking setup.py. The installation script requires the Python module distutils, which for Debian users is available with the python-dev package. Other dependencies are listed in the file README. For detailed installation instructions, see the FFC user manual which is available on http://fenicsproject.org/ and also in the subdirectory doc/manual/ of this source tree. ffc-1.6.0/README.rst000066400000000000000000000041771255571034100137570ustar00rootroot00000000000000----------------------------- FFC: The FEniCS Form Compiler ----------------------------- FFC is a compiler for finite element variational forms. From a high-level description of the form, it generates efficient low-level C++ code that can be used to assemble the corresponding discrete operator (tensor). In particular, a bilinear form may be assembled into a matrix and a linear form may be assembled into a vector. FFC may be used either from the command line (by invoking the ``ffc`` command) or as a Python module (``import ffc``). FFC is part of the FEniCS project (http://www.fenicsproject.org) and functions as a just-in-time (JIT) compiler for DOLFIN. For further introduction to FFC, open the FFC user manual available in the subdirectory ``doc/manual/`` of this source tree, or try out the demos available in the subdirectory ``src/demo/`` of this source tree. License ------- This program is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program. If not, see . Dependencies ------------ #. Python, version 2.7 or later #. The latest version of FIAT, Instant and UFL You need to have FIAT, Instant and UFL installed. They are available from the web page: https://bitbucket.org/fenics-project/. #. The Python NumPy module #. The Python Six module #. SWIG, version 2.0.0 or later for Python 2 and version 3.0.3 or later for Python 3 Notes ----- From February 2014, the code generation interface UFC is distributed as part of FFC, and the UFC repository has been merged into the FFC repository. From this point onwards, UFC version numbers are reset to the same version numbers as for FFC. ffc-1.6.0/bench/000077500000000000000000000000001255571034100133365ustar00rootroot00000000000000ffc-1.6.0/bench/HyperElasticity.ufl000066400000000000000000000040231255571034100171670ustar00rootroot00000000000000# Copyright (C) 2009 Harish Narayanan # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2009-09-29 # Last changed: 2011-07-01 # # The bilinear form a(u, v) and linear form L(v) for # a hyperelastic model. (Copied from dolfin/demo/pde/hyperelasticity/cpp) # # Compile this form with FFC: ffc HyperElasticity.ufl. # Coefficient spaces element = VectorElement("Lagrange", tetrahedron, 1) # Coefficients v = TestFunction(element) # Test function du = TrialFunction(element) # Incremental displacement u = Coefficient(element) # Displacement from previous iteration B = Coefficient(element) # Body force per unit mass T = Coefficient(element) # Traction force on the boundary # Kinematics d = len(u) I = Identity(d) # Identity tensor F = I + grad(u) # Deformation gradient C = F.T*F # Right Cauchy-Green tensor E = (C - I)/2 # Euler-Lagrange strain tensor E = variable(E) # Material constants mu = Constant(tetrahedron) # Lame's constants lmbda = Constant(tetrahedron) # Strain energy function (material model) psi = lmbda/2*(tr(E)**2) + mu*tr(E*E) S = diff(psi, E) # Second Piola-Kirchhoff stress tensor P = F*S # First Piola-Kirchoff stress tensor # The variational problem corresponding to hyperelasticity L = inner(P, grad(v))*dx - inner(B, v)*dx - inner(T, v)*ds a = derivative(L, u, du) ffc-1.6.0/bench/MassH1_2D_1.ufl000066400000000000000000000014471255571034100157150ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("Lagrange", triangle, 1) v = TestFunction(element) u = TrialFunction(element) a = v*u*dx ffc-1.6.0/bench/MassH1_2D_2.ufl000066400000000000000000000014471255571034100157160ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("Lagrange", triangle, 2) v = TestFunction(element) u = TrialFunction(element) a = v*u*dx ffc-1.6.0/bench/MassH1_2D_3.ufl000066400000000000000000000014471255571034100157170ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("Lagrange", triangle, 3) v = TestFunction(element) u = TrialFunction(element) a = v*u*dx ffc-1.6.0/bench/MassH1_2D_4.ufl000066400000000000000000000014471255571034100157200ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("Lagrange", triangle, 4) v = TestFunction(element) u = TrialFunction(element) a = v*u*dx ffc-1.6.0/bench/MassH1_2D_5.ufl000066400000000000000000000014471255571034100157210ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("Lagrange", triangle, 5) v = TestFunction(element) u = TrialFunction(element) a = v*u*dx ffc-1.6.0/bench/MassHcurl_2D_1.ufl000066400000000000000000000014621255571034100165170ustar00rootroot00000000000000# Copyright (C) 2004-2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("N1curl", triangle, 1) v = TestFunction(element) u = TrialFunction(element) a = inner(v, u)*dx ffc-1.6.0/bench/MassHcurl_2D_2.ufl000066400000000000000000000014621255571034100165200ustar00rootroot00000000000000# Copyright (C) 2004-2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("N1curl", triangle, 2) v = TestFunction(element) u = TrialFunction(element) a = inner(v, u)*dx ffc-1.6.0/bench/MassHcurl_2D_3.ufl000066400000000000000000000014621255571034100165210ustar00rootroot00000000000000# Copyright (C) 2004-2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("N1curl", triangle, 3) v = TestFunction(element) u = TrialFunction(element) a = inner(v, u)*dx ffc-1.6.0/bench/MassHcurl_2D_4.ufl000066400000000000000000000014621255571034100165220ustar00rootroot00000000000000# Copyright (C) 2004-2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("N1curl", triangle, 4) v = TestFunction(element) u = TrialFunction(element) a = inner(v, u)*dx ffc-1.6.0/bench/MassHcurl_2D_5.ufl000066400000000000000000000014621255571034100165230ustar00rootroot00000000000000# Copyright (C) 2004-2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("N1curl", triangle, 5) v = TestFunction(element) u = TrialFunction(element) a = inner(v, u)*dx ffc-1.6.0/bench/MassHdiv_2D_1.ufl000066400000000000000000000014521255571034100163330ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("BDM", triangle, 1) v = TestFunction(element) u = TrialFunction(element) a = inner(v, u)*dx ffc-1.6.0/bench/MassHdiv_2D_2.ufl000066400000000000000000000014521255571034100163340ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("BDM", triangle, 2) v = TestFunction(element) u = TrialFunction(element) a = inner(v, u)*dx ffc-1.6.0/bench/MassHdiv_2D_3.ufl000066400000000000000000000014521255571034100163350ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("BDM", triangle, 3) v = TestFunction(element) u = TrialFunction(element) a = inner(v, u)*dx ffc-1.6.0/bench/MassHdiv_2D_4.ufl000066400000000000000000000014521255571034100163360ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("BDM", triangle, 4) v = TestFunction(element) u = TrialFunction(element) a = inner(v, u)*dx ffc-1.6.0/bench/MassHdiv_2D_5.ufl000066400000000000000000000014521255571034100163370ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("BDM", triangle, 5) v = TestFunction(element) u = TrialFunction(element) a = inner(v, u)*dx ffc-1.6.0/bench/NavierStokes_2D_1.ufl000066400000000000000000000016321255571034100172320ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . scalar = FiniteElement("Lagrange", triangle, 1) vector = VectorElement("Lagrange", triangle, 1) v = TestFunction(vector) u = TrialFunction(vector) w = Coefficient(vector) rho = Coefficient(scalar) a = rho*inner(v, grad(w)*u)*dx ffc-1.6.0/bench/NavierStokes_2D_2.ufl000066400000000000000000000016321255571034100172330ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . scalar = FiniteElement("Lagrange", triangle, 2) vector = VectorElement("Lagrange", triangle, 2) v = TestFunction(vector) u = TrialFunction(vector) w = Coefficient(vector) rho = Coefficient(scalar) a = rho*inner(v, grad(w)*u)*dx ffc-1.6.0/bench/NavierStokes_2D_3.ufl000066400000000000000000000016321255571034100172340ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . scalar = FiniteElement("Lagrange", triangle, 3) vector = VectorElement("Lagrange", triangle, 3) v = TestFunction(vector) u = TrialFunction(vector) w = Coefficient(vector) rho = Coefficient(scalar) a = rho*inner(v, grad(w)*u)*dx ffc-1.6.0/bench/Poisson_2D_1.ufl000066400000000000000000000015001255571034100162410ustar00rootroot00000000000000# Copyright (C) 2004-2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("Lagrange", triangle, 1) v = TestFunction(element) u = TrialFunction(element) a = inner(grad(v), grad(u))*dx ffc-1.6.0/bench/Poisson_2D_2.ufl000066400000000000000000000015001255571034100162420ustar00rootroot00000000000000# Copyright (C) 2004-2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("Lagrange", triangle, 2) v = TestFunction(element) u = TrialFunction(element) a = inner(grad(v), grad(u))*dx ffc-1.6.0/bench/Poisson_2D_3.ufl000066400000000000000000000015001255571034100162430ustar00rootroot00000000000000# Copyright (C) 2004-2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("Lagrange", triangle, 3) v = TestFunction(element) u = TrialFunction(element) a = inner(grad(v), grad(u))*dx ffc-1.6.0/bench/Poisson_2D_4.ufl000066400000000000000000000015001255571034100162440ustar00rootroot00000000000000# Copyright (C) 2004-2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("Lagrange", triangle, 4) v = TestFunction(element) u = TrialFunction(element) a = inner(grad(v), grad(u))*dx ffc-1.6.0/bench/Poisson_2D_5.ufl000066400000000000000000000015001255571034100162450ustar00rootroot00000000000000# Copyright (C) 2004-2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("Lagrange", triangle, 5) v = TestFunction(element) u = TrialFunction(element) a = inner(grad(v), grad(u))*dx ffc-1.6.0/bench/WeightedPoisson_2D_1.ufl000066400000000000000000000015261255571034100177320ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("Lagrange", triangle, 1) v = TestFunction(element) u = TrialFunction(element) c = Coefficient(element) a = c*inner(grad(v), grad(u))*dx ffc-1.6.0/bench/WeightedPoisson_2D_2.ufl000066400000000000000000000015261255571034100177330ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("Lagrange", triangle, 2) v = TestFunction(element) u = TrialFunction(element) c = Coefficient(element) a = c*inner(grad(v), grad(u))*dx ffc-1.6.0/bench/WeightedPoisson_2D_3.ufl000066400000000000000000000015261255571034100177340ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("Lagrange", triangle, 3) v = TestFunction(element) u = TrialFunction(element) c = Coefficient(element) a = c*inner(grad(v), grad(u))*dx ffc-1.6.0/bench/WeightedPoisson_2D_4.ufl000066400000000000000000000015261255571034100177350ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("Lagrange", triangle, 4) v = TestFunction(element) u = TrialFunction(element) c = Coefficient(element) a = c*inner(grad(v), grad(u))*dx ffc-1.6.0/bench/WeightedPoisson_2D_5.ufl000066400000000000000000000015261255571034100177360ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . element = FiniteElement("Lagrange", triangle, 5) v = TestFunction(element) u = TrialFunction(element) c = Coefficient(element) a = c*inner(grad(v), grad(u))*dx ffc-1.6.0/bench/bench.py000066400000000000000000000037321255571034100147740ustar00rootroot00000000000000"""This script runs a benchmark study on the form files found in the current directory. It relies on the regression test script for timings.""" # Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-05-11 # Last changed: 2010-05-11 import os, glob from utils import print_table # Test options test_options = ["-r tensor", "-r tensor -O", "-r quadrature", "-r quadrature -O"] # Get list of test cases test_cases = sorted([f.split(".")[0] for f in glob.glob("*.ufl")]) # Open logfile logfile = open("bench.log", "w") # Iterate over options os.chdir("../test/regression") table = {} for (j, test_option) in enumerate(test_options): # Run benchmark print "\nUsing options %s\n" % test_option os.system("python test.py --bench %s" % test_option) # Collect results for (i, test_case) in enumerate(test_cases): output = open("output/%s.out" % test_case).read() lines = [line for line in output.split("\n") if "bench" in line] if not len(lines) == 1: raise RuntimeError("Unable to extract benchmark data for test case %s" % test_case) timing = float(lines[0].split(":")[-1]) table[(i, j)] = (test_case, test_option, timing) logfile.write("%s, %s, %g\n" % (test_case, test_option, timing)) # Close logfile logfile.close() # Print results print_table(table, "FFC bench") ffc-1.6.0/bench/plot.py000066400000000000000000000047331255571034100146750ustar00rootroot00000000000000"This script plots the results found in bench.log." # Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-05-13 # Last changed: 2010-05-13 from pylab import * # Read logfile results = {} try: output = open("bench.log").read() except: output = open("results/bench.log").read() for line in output.split("\n"): if not "," in line: continue test_case, test_option, timing = [w.strip() for w in line.split(",")] try: form, degree = test_case.split("_") except: form, dim, degree = test_case.split("_") form = form + "_" + dim if not form in results: results[form] = {} if not test_option in results[form]: results[form][test_option] = ([], []) results[form][test_option][0].append(int(degree)) results[form][test_option][1].append(float(timing)) # Plot results forms = sorted([form for form in results]) test_options = ["-r quadrature", "-r quadrature -O", "-r tensor", "-r tensor -O"] bullets = ["x-", "o-", "*-", "s-"] for (i, form) in enumerate(forms): figure(i) # Plot timings subplot(121) for (j, test_option) in enumerate(test_options): q, t = results[form][test_option] semilogy(q, t, bullets[j]) hold(True) a = list(axis()); a[-1] = 50.0*a[-1]; axis(a); legend(test_options, loc="upper left") grid(True) xlabel('degree') ylabel(form) title('CPU time') # Plot speedups subplot(122) q0, t0 = results[form]["-r quadrature"] for (j, test_option) in enumerate(test_options): q, t = results[form][test_option] t = [t0[k] / t[k] for k in range(len(t))] semilogy(q, t, bullets[j]) hold(True) a = list(axis()); a[-1] = 50.0*a[-1]; axis(a); legend(test_options, loc="upper left") grid(True) xlabel('degree') title("Speedup vs '-r quadrature'") show() ffc-1.6.0/bench/results/000077500000000000000000000000001255571034100150375ustar00rootroot00000000000000ffc-1.6.0/bench/results/bench.log000066400000000000000000000105341255571034100166240ustar00rootroot00000000000000MassH1_1, -r tensor, 4.55379e-08 MassH1_2, -r tensor, 5.8651e-08 MassH1_3, -r tensor, 1.62125e-07 MassH1_4, -r tensor, 3.8147e-07 MassH1_5, -r tensor, 5.6076e-07 MassHcurl_1, -r tensor, 9.01222e-08 MassHcurl_2, -r tensor, 2.46048e-07 MassHcurl_3, -r tensor, 7.93457e-07 MassHcurl_4, -r tensor, 5.58472e-06 MassHcurl_5, -r tensor, 1.17188e-05 MassHdiv_1, -r tensor, 1.65939e-07 MassHdiv_2, -r tensor, 5.26428e-07 MassHdiv_3, -r tensor, 3.75366e-06 MassHdiv_4, -r tensor, 8.48389e-06 MassHdiv_5, -r tensor, 1.61133e-05 NavierStokes_1, -r tensor, 5.37872e-07 NavierStokes_2, -r tensor, 2.02637e-05 NavierStokes_3, -r tensor, 0.000178711 Poisson_1, -r tensor, 8.2016e-08 Poisson_2, -r tensor, 1.19209e-07 Poisson_3, -r tensor, 2.82288e-07 Poisson_4, -r tensor, 6.48499e-07 Poisson_5, -r tensor, 3.72314e-06 WeightedPoisson_1, -r tensor, 1.23978e-07 WeightedPoisson_2, -r tensor, 5.45502e-07 WeightedPoisson_3, -r tensor, 7.50732e-06 WeightedPoisson_4, -r tensor, 2.80762e-05 WeightedPoisson_5, -r tensor, 8.05664e-05 MassH1_1, -r tensor -O, 3.71933e-08 MassH1_2, -r tensor -O, 5.57899e-08 MassH1_3, -r tensor -O, 1.24931e-07 MassH1_4, -r tensor -O, 2.82288e-07 MassH1_5, -r tensor -O, 5.34058e-07 MassHcurl_1, -r tensor -O, 8.91685e-08 MassHcurl_2, -r tensor -O, 2.46048e-07 MassHcurl_3, -r tensor -O, 7.47681e-07 MassHcurl_4, -r tensor -O, 5.00488e-06 MassHcurl_5, -r tensor -O, 1.01929e-05 MassHdiv_1, -r tensor -O, 1.57356e-07 MassHdiv_2, -r tensor -O, 4.50134e-07 MassHdiv_3, -r tensor -O, 1.31226e-06 MassHdiv_4, -r tensor -O, 7.20215e-06 MassHdiv_5, -r tensor -O, 1.36719e-05 NavierStokes_1, -r tensor -O, 3.43323e-07 NavierStokes_2, -r tensor -O, 1.16577e-05 NavierStokes_3, -r tensor -O, 8.93555e-05 Poisson_1, -r tensor -O, 8.4877e-08 Poisson_2, -r tensor -O, 1.13487e-07 Poisson_3, -r tensor -O, 2.32697e-07 Poisson_4, -r tensor -O, 4.80652e-07 Poisson_5, -r tensor -O, 9.38416e-07 WeightedPoisson_1, -r tensor -O, 1.13487e-07 WeightedPoisson_2, -r tensor -O, 4.27246e-07 WeightedPoisson_3, -r tensor -O, 5.31006e-06 WeightedPoisson_4, -r tensor -O, 1.96533e-05 WeightedPoisson_5, -r tensor -O, 5.51758e-05 MassH1_1, -r quadrature, 3.7384e-07 MassH1_2, -r quadrature, 2.94495e-06 MassH1_3, -r quadrature, 1.42822e-05 MassH1_4, -r quadrature, 5.07812e-05 MassH1_5, -r quadrature, 0.000148437 MassHcurl_1, -r quadrature, 8.39233e-07 MassHcurl_2, -r quadrature, 1.14136e-05 MassHcurl_3, -r quadrature, 7.8125e-05 MassHcurl_4, -r quadrature, 0.000316406 MassHcurl_5, -r quadrature, 0.000992188 MassHdiv_1, -r quadrature, 8.54492e-06 MassHdiv_2, -r quadrature, 7.51953e-05 MassHdiv_3, -r quadrature, 0.000367188 MassHdiv_4, -r quadrature, 0.00128125 MassHdiv_5, -r quadrature, 0.00378125 NavierStokes_1, -r quadrature, 4.02832e-06 NavierStokes_2, -r quadrature, 5.9082e-05 NavierStokes_3, -r quadrature, 0.000355469 Poisson_1, -r quadrature, 2.07901e-07 Poisson_2, -r quadrature, 3.38745e-06 Poisson_3, -r quadrature, 2.03857e-05 Poisson_4, -r quadrature, 7.91016e-05 Poisson_5, -r quadrature, 0.000255859 WeightedPoisson_1, -r quadrature, 2.57492e-07 WeightedPoisson_2, -r quadrature, 8.11768e-06 WeightedPoisson_3, -r quadrature, 4.05273e-05 WeightedPoisson_4, -r quadrature, 0.000183594 WeightedPoisson_5, -r quadrature, 0.000535156 MassH1_1, -r quadrature -O, 3.64304e-07 MassH1_2, -r quadrature -O, 3.17383e-06 MassH1_3, -r quadrature -O, 1.3916e-05 MassH1_4, -r quadrature -O, 4.8584e-05 MassH1_5, -r quadrature -O, 0.000136719 MassHcurl_1, -r quadrature -O, 6.79016e-07 MassHcurl_2, -r quadrature -O, 8.42285e-06 MassHcurl_3, -r quadrature -O, 6.00586e-05 MassHcurl_4, -r quadrature -O, 0.000248047 MassHcurl_5, -r quadrature -O, 0.000777344 MassHdiv_1, -r quadrature -O, 2.62451e-06 MassHdiv_2, -r quadrature -O, 2.28271e-05 MassHdiv_3, -r quadrature -O, 0.000111328 MassHdiv_4, -r quadrature -O, 0.00040625 MassHdiv_5, -r quadrature -O, 0.00122656 NavierStokes_1, -r quadrature -O, 1.60217e-06 NavierStokes_2, -r quadrature -O, 2.19727e-05 NavierStokes_3, -r quadrature -O, 0.000132813 Poisson_1, -r quadrature -O, 2.02179e-07 Poisson_2, -r quadrature -O, 3.479e-06 Poisson_3, -r quadrature -O, 2.49023e-05 Poisson_4, -r quadrature -O, 0.000107422 Poisson_5, -r quadrature -O, 0.000349609 WeightedPoisson_1, -r quadrature -O, 2.26974e-07 WeightedPoisson_2, -r quadrature -O, 7.93457e-06 WeightedPoisson_3, -r quadrature -O, 4.41895e-05 WeightedPoisson_4, -r quadrature -O, 0.000224609 WeightedPoisson_5, -r quadrature -O, 0.000703125 ffc-1.6.0/bench/results/results.log000066400000000000000000000115111255571034100172420ustar00rootroot00000000000000Linux aule 2.6.32-21-generic #32-Ubuntu SMP Fri Apr 16 08:09:38 UTC 2010 x86_64 GNU/Linux Thu May 13 21:39:15 CEST 2010 ---------------------------------------------------------------------------------- | FFC bench | -r tensor | -r tensor -O | -r quadrature | -r quadrature -O | ---------------------------------------------------------------------------------- | MassH1_1 | 4.5538e-08 | 3.7193e-08 | 3.7384e-07 | 3.643e-07 | ---------------------------------------------------------------------------------- | MassH1_2 | 5.8651e-08 | 5.579e-08 | 2.9449e-06 | 3.1738e-06 | ---------------------------------------------------------------------------------- | MassH1_3 | 1.6212e-07 | 1.2493e-07 | 1.4282e-05 | 1.3916e-05 | ---------------------------------------------------------------------------------- | MassH1_4 | 3.8147e-07 | 2.8229e-07 | 5.0781e-05 | 4.8584e-05 | ---------------------------------------------------------------------------------- | MassH1_5 | 5.6076e-07 | 5.3406e-07 | 0.00014844 | 0.00013672 | ---------------------------------------------------------------------------------- | MassHcurl_1 | 9.0122e-08 | 8.9169e-08 | 8.3923e-07 | 6.7902e-07 | ---------------------------------------------------------------------------------- | MassHcurl_2 | 2.4605e-07 | 2.4605e-07 | 1.1414e-05 | 8.4229e-06 | ---------------------------------------------------------------------------------- | MassHcurl_3 | 7.9346e-07 | 7.4768e-07 | 7.8125e-05 | 6.0059e-05 | ---------------------------------------------------------------------------------- | MassHcurl_4 | 5.5847e-06 | 5.0049e-06 | 0.00031641 | 0.00024805 | ---------------------------------------------------------------------------------- | MassHcurl_5 | 1.1719e-05 | 1.0193e-05 | 0.00099219 | 0.00077734 | ---------------------------------------------------------------------------------- | MassHdiv_1 | 1.6594e-07 | 1.5736e-07 | 8.5449e-06 | 2.6245e-06 | ---------------------------------------------------------------------------------- | MassHdiv_2 | 5.2643e-07 | 4.5013e-07 | 7.5195e-05 | 2.2827e-05 | ---------------------------------------------------------------------------------- | MassHdiv_3 | 3.7537e-06 | 1.3123e-06 | 0.00036719 | 0.00011133 | ---------------------------------------------------------------------------------- | MassHdiv_4 | 8.4839e-06 | 7.2021e-06 | 0.0012813 | 0.00040625 | ---------------------------------------------------------------------------------- | MassHdiv_5 | 1.6113e-05 | 1.3672e-05 | 0.0037812 | 0.0012266 | ---------------------------------------------------------------------------------- | NavierStokes_1 | 5.3787e-07 | 3.4332e-07 | 4.0283e-06 | 1.6022e-06 | ---------------------------------------------------------------------------------- | NavierStokes_2 | 2.0264e-05 | 1.1658e-05 | 5.9082e-05 | 2.1973e-05 | ---------------------------------------------------------------------------------- | NavierStokes_3 | 0.00017871 | 8.9355e-05 | 0.00035547 | 0.00013281 | ---------------------------------------------------------------------------------- | Poisson_1 | 8.2016e-08 | 8.4877e-08 | 2.079e-07 | 2.0218e-07 | ---------------------------------------------------------------------------------- | Poisson_2 | 1.1921e-07 | 1.1349e-07 | 3.3875e-06 | 3.479e-06 | ---------------------------------------------------------------------------------- | Poisson_3 | 2.8229e-07 | 2.327e-07 | 2.0386e-05 | 2.4902e-05 | ---------------------------------------------------------------------------------- | Poisson_4 | 6.485e-07 | 4.8065e-07 | 7.9102e-05 | 0.00010742 | ---------------------------------------------------------------------------------- | Poisson_5 | 3.7231e-06 | 9.3842e-07 | 0.00025586 | 0.00034961 | ---------------------------------------------------------------------------------- | WeightedPoisson_1 | 1.2398e-07 | 1.1349e-07 | 2.5749e-07 | 2.2697e-07 | ---------------------------------------------------------------------------------- | WeightedPoisson_2 | 5.455e-07 | 4.2725e-07 | 8.1177e-06 | 7.9346e-06 | ---------------------------------------------------------------------------------- | WeightedPoisson_3 | 7.5073e-06 | 5.3101e-06 | 4.0527e-05 | 4.4189e-05 | ---------------------------------------------------------------------------------- | WeightedPoisson_4 | 2.8076e-05 | 1.9653e-05 | 0.00018359 | 0.00022461 | ---------------------------------------------------------------------------------- ffc-1.6.0/bench/utils.py000066400000000000000000000034141255571034100150520ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-05-11 # Last changed: 2010-05-11 def print_table(values, title): "Print nicely formatted table." m = max([key[0] for key in values]) + 2 n = max([key[1] for key in values]) + 2 table = [] for i in range(m): table.append(["" for j in range(n)]) for i in range(m - 1): table[i + 1][0] = str(values[(i, 0)][0]) for j in range(n - 1): table[0][j + 1] = str(values[(0, j)][1]) for i in range(m - 1): for j in range(n - 1): value = values[(i, j)][2] if isinstance(value, float): value = "%.5g" % value table[i + 1][j + 1] = value table[0][0] = title column_sizes = [max([len(table[i][j]) for i in range(m)]) for j in range(n)] row_size = sum(column_sizes) + 3*(len(column_sizes) - 1) + 2 print "" for i in range(m): print " " + "-"*row_size print "|", for j in range(n): print table[i][j] + " "*(column_sizes[j] - len(table[i][j])), print "|", print "" print " " + "-"*row_size print "" ffc-1.6.0/cmake/000077500000000000000000000000001255571034100133375ustar00rootroot00000000000000ffc-1.6.0/cmake/templates/000077500000000000000000000000001255571034100153355ustar00rootroot00000000000000ffc-1.6.0/cmake/templates/UFCConfig.cmake.in000066400000000000000000000014071255571034100205110ustar00rootroot00000000000000# CMake configuration for UFC (http://fenicsproject.org/) # # This file has been generated automatically by the FFC/UFC installation. # FIXME: Check that naming conforms to CMake standards # Package found set(UFC_FOUND TRUE) # Include directories set(UFC_INCLUDE_DIRS "@INSTALL_PREFIX/include") # Compiler flags set(UFC_CXX_FLAGS "@CXX_FLAGS") # Python include directories set(UFC_PYTHON_INCLUDE_DIRS "@PYTHON_INCLUDE_DIR") # Python libraries set(UFC_PYTHON_LIBRARIES "@PYTHON_LIBRARY") # Python executable set(UFC_PYTHON_EXECUTABLE "@PYTHON_EXECUTABLE") # SWIG executable set(UFC_SWIG_EXECUTABLE "@SWIG_EXECUTABLE") # Version set(UFC_VERSION_STRING "@FULLVERSION") # The location of the UseUFC.cmake file set(UFC_USE_FILE "@INSTALL_PREFIX/share/ufc/UseUFC.cmake") ffc-1.6.0/cmake/templates/UFCConfigVersion.cmake.in000066400000000000000000000015541255571034100220620ustar00rootroot00000000000000# CMake configuration for UFC (http://fenicsproject.org/) # # This file has been generated automatically by the FFC/UFC installation. # FIXME: When should versions be defined as compatible? set(PACKAGE_VERSION "@FULLVERSION") set(PACKAGE_VERSION_MAJOR "@MAJOR") set(PACKAGE_VERSION_MINOR "@MINOR") set(PACKAGE_VERSION_PATCH "@MICRO") # This version is compatible only with matching major.minor versions. if ("${PACKAGE_VERSION_MAJOR}.${PACKAGE_VERSION_MINOR}" VERSION_EQUAL "${PACKAGE_FIND_VERSION_MAJOR}.${PACKAGE_FIND_VERSION_MINOR}") # This version is compatible with equal or lesser patch versions. if (NOT "${PACKAGE_VERSION_PATCH}" VERSION_LESS "${PACKAGE_FIND_VERSION_PATCH}") set(PACKAGE_VERSION_COMPATIBLE 1) if ("${PACKAGE_VERSION_PATCH}" VERSION_EQUAL "${PACKAGE_FIND_VERSION_PATCH}") set(PACKAGE_VERSION_EXACT 1) endif() endif() endif() ffc-1.6.0/cmake/templates/UseUFC.cmake.in000066400000000000000000000011611255571034100200350ustar00rootroot00000000000000# CMake configuration for UFC (http://fenicsproject.org/) # # This file has been generated automatically by the FFC/UFC installation. # # This file sets up include directories, link directories, and # compiler settings for a project to use UFC. It should not be # included directly, but rather through the UFC_USE_FILE setting # obtained from UFCConfig.cmake. if (NOT UFC_USE_FILE_INCLUDED) set(UFC_USE_FILE_INCLUDED 1) # Add include directories needed to use UFC include_directories(${UFC_INCLUDE_DIRS}) # Add compiler flags needed to use UFC set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${UFC_CXX_FLAGS}") endif() ffc-1.6.0/cmake/templates/ufc-1.pc.in000066400000000000000000000001651255571034100172030ustar00rootroot00000000000000Name: UFC Description: Unified Form-assembly Code Version: @FULLVERSION Cflags: -I@INSTALL_PREFIX/include @CXX_FLAGS ffc-1.6.0/demo/000077500000000000000000000000001255571034100132035ustar00rootroot00000000000000ffc-1.6.0/demo/AdaptivePoisson.ufl000066400000000000000000000004221255571034100170210ustar00rootroot00000000000000element = FiniteElement("Lagrange", triangle, 1) element2 = FiniteElement("Lagrange", triangle, 3) u = TrialFunction(element) v = TestFunction(element) f = Coefficient(element2) g = Coefficient(element) a = inner(grad(u), grad(v))*dx() L = f*v*dx() + g*v*ds() M = u*dx() ffc-1.6.0/demo/AlgebraOperators.ufl000066400000000000000000000025321255571034100171510ustar00rootroot00000000000000# Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Test all algebra operators on Coefficients. # # Compile this form with FFC: ffc AlgebraOperators.ufl element = FiniteElement("Lagrange", triangle, 1) c0 = Coefficient(element) c1 = Coefficient(element) s0 = 3*c0 - c1 p0 = c0*c1 f0 = c0/c1 integrand = 5*c0 + 5*p0 + 5*f0\ + s0*c0 + s0*p0 + s0*f0\ + 5/c0 + 5/p0 + 5/f0\ + s0/c0 + s0/p0 + s0/f0\ + s0/5 + s0/5 + s0/5\ + c0**2 + s0**2 + p0**2 + f0**2\ + c1**2.2 + s0**2.2 + p0**2.2 + f0**2.2\ + c0**c1 + s0**c0 + p0**c0 + f0**c0\ + c0**s0 + s0**p0 + p0**f0 + f0**p0\ + abs(c0) + abs(s0) + abs(p0) + abs(f0) a = integrand*dx ffc-1.6.0/demo/Biharmonic.ufl000066400000000000000000000030301255571034100157620ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2009 Kristian B. Oelgaard, Garth N. Wells and Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2009-06-26 # Last changed: 2011-03-08 # # The bilinear form a(u, v) and linear form L(v) for # Biharmonic equation in a discontinuous Galerkin (DG) # formulation. # # Compile this form with FFC: ffc -l dolfin Biharmonic.ufl # Elements element = FiniteElement("Lagrange", triangle, 2) # Trial and test functions u = TrialFunction(element) v = TestFunction(element) f = Coefficient(element) # Normal component, mesh size and right-hand side n = FacetNormal(triangle) h = Constant(triangle) # Parameters alpha = Constant(triangle) # Bilinear form a = inner(div(grad(u)), div(grad(v)))*dx \ - inner(jump(grad(u), n), avg(div(grad(v))))*dS \ - inner(avg(div(grad(u))), jump(grad(v), n))*dS \ + alpha('+')/h('+')*inner(jump(grad(u),n), jump(grad(v),n))*dS # Linear form L = f*v*dx ffc-1.6.0/demo/CellGeometry.ufl000066400000000000000000000022671255571034100163150ustar00rootroot00000000000000# Copyright (C) 2013 Martin S. Alnaes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # A functional M involving a bunch of cell geometry quantities in ufl. # # Compile this form with FFC: ffc CellGeometry.ufl cell = tetrahedron V = FiniteElement("CG", cell, 1) u = Coefficient(V) # TODO: Add all geometry for all cell types to this and other demo files, need for regression test. x = SpatialCoordinate(cell) n = FacetNormal(cell) vol = CellVolume(cell) rad = Circumradius(cell) area = FacetArea(cell) M = u*(x[0]*vol*rad)*dx + u*(x[0]*vol*rad*area)*ds # + u*area*avg(n[0]*x[0]*vol*rad)*dS ffc-1.6.0/demo/CoefficientOperators.ufl000066400000000000000000000020061255571034100200260ustar00rootroot00000000000000# Copyright (C) 2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Test form for operators on Coefficients. # # Compile this form with FFC: ffc CoefficientOperators.ufl element = FiniteElement("Lagrange", triangle, 1) u = TrialFunction(element) v = TestFunction(element) f = Coefficient(element) g = Coefficient(element) a = sqrt(1/abs(1/f))*sqrt(g)*inner(grad(u), grad(v))*dx + sqrt(f*g)*g*u*v*dx ffc-1.6.0/demo/Components.ufl000066400000000000000000000020001255571034100160300ustar00rootroot00000000000000# Copyright (C) 2011 Garth N. Wells # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # This example demonstrates how to create vectors component-wise # # Compile this form with FFC: ffc Component.ufl element = VectorElement("Lagrange", tetrahedron, 1) v = TestFunction(element) f = Coefficient(element) # Create vector v0 = as_vector([v[0], v[1], 0.0]) # Use created vector in linear form L = dot(f, v0)*dx ffc-1.6.0/demo/Conditional.ufl000066400000000000000000000025031255571034100161560ustar00rootroot00000000000000# Copyright (C) 2010-2011 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Illustration on how to use Conditional to define a source term # # Compile this form with FFC: ffc Conditional.ufl element = FiniteElement("Lagrange", triangle, 2) v = TestFunction(element) g = Constant(triangle) x = SpatialCoordinate(triangle) c0 = conditional(le( (x[0]-0.33)**2 + (x[1]-0.67)**2, 0.015), -1.0, 5.0) c = conditional( le( (x[0]-0.33)**2 + (x[1]-0.67)**2, 0.025), c0, 0.0 ) t0 = And(ge( x[0], 0.55), le(x[0], 0.95)) t1 = Or( lt( x[1], 0.05), gt(x[1], 0.45)) t2 = And(t0, Not(t1)) t = conditional(And(ge( x[1] - x[0] - 0.05 + 0.55, 0.0), t2), -1.0, 0.0) k = conditional(gt(1,0),g,g+1) f = c + t + k L = v*f*dx ffc-1.6.0/demo/Constant.ufl000066400000000000000000000020041255571034100155000ustar00rootroot00000000000000# Copyright (C) 2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Test form for scalar and vector constants. # # Compile this form with FFC: ffc Constant.ufl element = FiniteElement("Lagrange", triangle, 1) v = TestFunction(element) u = TrialFunction(element) f = Coefficient(element) c = Constant(triangle) d = VectorConstant(triangle) a = c*inner(grad(u), grad(v))*dx L = inner(d, grad(v))*dx ffc-1.6.0/demo/CustomIntegral.ufl000066400000000000000000000033021255571034100166510ustar00rootroot00000000000000# Copyright (C) 2014 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2014-03-04 # Last changed: 2014-06-03 # # This demo illustrates the use of custom integrals. # # Compile this form with FFC: ffc CustomIntegral.ufl # Define element element = FiniteElement("Lagrange", triangle, 1) # Define trial and test functions and right-hand side u = TrialFunction(element) v = TestFunction(element) f = Coefficient(element) # Define facet normal and mesh size n = FacetNormal(triangle) h = 2.0*Circumradius(triangle) h = (h('+') + h('-')) / 2 # Define custom measures (FIXME: prettify this) dc0 = dc(0, metadata={"num_cells": 1}) dc1 = dc(1, metadata={"num_cells": 2}) dc2 = dc(2, metadata={"num_cells": 2}) # Define measures for integration dx = dx + dc0 # domain integral di = dc1 # interface integral do = dc2 # overlap integral # Parameters alpha = 4.0 # Bilinear form a = dot(grad(u), grad(v))*dx \ - dot(avg(grad(u)), jump(v, n))*di \ - dot(avg(grad(v)), jump(u, n))*di \ + alpha/h*jump(u)*jump(v)*di \ + dot(jump(grad(u)), jump(grad(v)))*do # Linear form L = f*v*dx ffc-1.6.0/demo/CustomMixedIntegral.ufl000066400000000000000000000040031255571034100176370ustar00rootroot00000000000000# Copyright (C) 2014 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2014-06-10 # Last changed: 2014-06-17 # # This demo illustrates the use of custom integrals with mixed elements. # # Compile this form with FFC: ffc CustomMixedIntegral.ufl # Define element P2 = VectorElement("Lagrange", triangle, 2) P1 = FiniteElement("Lagrange", triangle, 1) TH = P2 * P1 # Define trial and test functions and right-hand side (u, p) = TrialFunctions(TH) (v, q) = TestFunctions(TH) f = Coefficient(P2) # Define facet normal and mesh size n = FacetNormal(triangle) h = 2.0*Circumradius(triangle) h = (h('+') + h('-')) / 2 # Define custom measures (FIXME: prettify this) dc0 = dc(0, metadata={"num_cells": 1}) dc1 = dc(1, metadata={"num_cells": 2}) dc2 = dc(2, metadata={"num_cells": 2}) # Define measures for integration dx = dx + dc0 # domain integral di = dc1 # interface integral do = dc2 # overlap integral # Parameters alpha = 4.0 def tensor_jump(v, n): return outer(v('+'), n('+')) + outer(v('-'), n('-')) def a_h(v, w): return inner(grad(v), grad(w))*dx \ - inner(avg(grad(v)), tensor_jump(w, n))*di \ - inner(avg(grad(w)), tensor_jump(v, n))*di def b_h(v, q): return -div(v)*q*dx + jump(v, n)*avg(q)*di def s_h(v, w): return inner(jump(grad(v)), jump(grad(w)))*do # Bilinear form a = a_h(u, v) + b_h(v, p) + b_h(u, q) + s_h(u, v) # Linear form L = dot(f, v)*dx ffc-1.6.0/demo/CustomVectorIntegral.ufl000066400000000000000000000035201255571034100200360ustar00rootroot00000000000000# Copyright (C) 2014 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2014-03-04 # Last changed: 2014-06-10 # # This demo illustrates the use of custom integrals with vector elements. # # Compile this form with FFC: ffc CustomVectorIntegral.ufl # Define element element = VectorElement("Lagrange", triangle, 1) # Define trial and test functions and right-hand side u = TrialFunction(element) v = TestFunction(element) f = Coefficient(element) # Define facet normal and mesh size n = FacetNormal(triangle) h = 2.0*Circumradius(triangle) h = (h('+') + h('-')) / 2 # Define custom measures (FIXME: prettify this) dc0 = dc(0, metadata={"num_cells": 1}) dc1 = dc(1, metadata={"num_cells": 2}) dc2 = dc(2, metadata={"num_cells": 2}) # Define measures for integration dx = dx + dc0 # domain integral di = dc1 # interface integral do = dc2 # overlap integral # Parameters alpha = 4.0 def tensor_jump(v, n): return outer(v('+'), n('+')) + outer(v('-'), n('-')) # Bilinear form a = inner(grad(u), grad(v))*dx \ - inner(avg(grad(u)), tensor_jump(v, n))*di \ - inner(avg(grad(v)), tensor_jump(u, n))*di \ + alpha/h*dot(jump(u), jump(v))*di \ + inner(jump(grad(u)), jump(grad(v)))*do # Linear form L = dot(f, v)*dx ffc-1.6.0/demo/Elasticity.ufl000066400000000000000000000020771255571034100160330ustar00rootroot00000000000000# Copyright (C) 2005 Johan Jansson # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Anders Logg 2005-2007 # Modified by Marie E. Rognes 2012 # # The bilinear form e(v) : e(u) for linear # elasticity with e(v) = 1/2 (grad(v) + grad(v)^T) # # Compile this form with FFC: ffc Elasticity.ufl element = VectorElement("Lagrange", tetrahedron, 1) u = TrialFunction(element) v = TestFunction(element) def eps(v): return sym(grad(v)) a = inner(eps(u), eps(v))*dx ffc-1.6.0/demo/EnergyNorm.ufl000066400000000000000000000017361255571034100160070ustar00rootroot00000000000000# Copyright (C) 2005-2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # This example demonstrates how to define a functional, here # the energy norm (squared) for a reaction-diffusion problem. # # Compile this form with FFC: ffc EnergyNorm.ufl element = FiniteElement("Lagrange", tetrahedron, 1) v = Coefficient(element) a = (v*v + inner(grad(v), grad(v)))*dx ffc-1.6.0/demo/Equation.ufl000066400000000000000000000031361255571034100155030ustar00rootroot00000000000000# Copyright (C) 2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Specification of a system F(u, v) = 0 and extraction of # the bilinear and linear forms a and L for the left- and # right-hand sides: # # F(u, v) = a(u, v) - L(v) = 0 # # The example below demonstrates the specification of the # linear system for a cG(1)/Crank-Nicholson time step for # the heat equation. # # The below formulation is equivalent to writing # # a = u*v*dx + 0.5*k*inner(grad(u), grad(v))*dx # L = u0*v*dx - 0.5*k*inner(grad(u0), grad(v))*dx # # but instead of manually shuffling terms not including # the unknown u to the right-hand side, all terms may # be listed on one line and left- and right-hand sides # extracted by lhs() and rhs(). # # Compile this form with FFC: ffc Equation.ufl element = FiniteElement("Lagrange", triangle, 1) k = 0.1 u = TrialFunction(element) v = TestFunction(element) u0 = Coefficient(element) eq = (u - u0)*v*dx + k*inner(grad(0.5*(u0 + u)), grad(v))*dx a = lhs(eq) L = rhs(eq) ffc-1.6.0/demo/FacetIntegrals.ufl000066400000000000000000000022171255571034100166100ustar00rootroot00000000000000# Copyright (C) 2009-2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2009-03-20 # Last changed: 2011-03-08 # # Simple example of a form defined over exterior and interior facets. # # Compile this form with FFC: ffc FacetIntegrals.ufl element = FiniteElement("Discontinuous Lagrange", triangle, 1) u = TrialFunction(element) v = TestFunction(element) h = Constant(triangle) n = VectorConstant(triangle) a = u*v*ds \ + u('+')*v('-')*dS \ + inner(jump(u, n), avg(grad(v)))*dS \ + inner(avg(grad(u)), jump(v, n))*dS ffc-1.6.0/demo/FacetRestrictionAD.ufl000066400000000000000000000017341255571034100173750ustar00rootroot00000000000000# Copyright (C) 2010 Garth N. Wells # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-06-07 # Last changed: 2011-07-01 # element = FiniteElement("Discontinuous Lagrange", triangle, 1) v = TestFunction(element) w = Coefficient(element) L = inner(grad(w), grad(v))*dx - dot(avg(grad(w)), avg(grad(v)))*dS u = TrialFunction(element) a = derivative(L, w, u) ffc-1.6.0/demo/Heat.ufl000066400000000000000000000023401255571034100145730ustar00rootroot00000000000000# Copyright (C) 2005-2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # The bilinear form a(v, u1) and linear form L(v) for # one backward Euler step with the heat equation. # # Compile this form with FFC: ffc Heat.ufl element = FiniteElement("Lagrange", triangle, 1) u1 = TrialFunction(element) # Value at t_n u0 = Coefficient(element) # Value at t_n-1 v = TestFunction(element) # Test function c = Coefficient(element) # Heat conductivity f = Coefficient(element) # Heat source k = Constant(triangle) # Time step a = u1*v*dx + k*c*inner(grad(u1), grad(v))*dx L = u0*v*dx + k*f*v*dx ffc-1.6.0/demo/HyperElasticity.ufl000066400000000000000000000040231255571034100170340ustar00rootroot00000000000000# Copyright (C) 2009 Harish Narayanan # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2009-09-29 # Last changed: 2011-07-01 # # The bilinear form a(u, v) and linear form L(v) for # a hyperelastic model. (Copied from dolfin/demo/pde/hyperelasticity/cpp) # # Compile this form with FFC: ffc HyperElasticity.ufl. # Coefficient spaces element = VectorElement("Lagrange", tetrahedron, 1) # Coefficients v = TestFunction(element) # Test function du = TrialFunction(element) # Incremental displacement u = Coefficient(element) # Displacement from previous iteration B = Coefficient(element) # Body force per unit mass T = Coefficient(element) # Traction force on the boundary # Kinematics d = len(u) I = Identity(d) # Identity tensor F = I + grad(u) # Deformation gradient C = F.T*F # Right Cauchy-Green tensor E = (C - I)/2 # Euler-Lagrange strain tensor E = variable(E) # Material constants mu = Constant(tetrahedron) # Lame's constants lmbda = Constant(tetrahedron) # Strain energy function (material model) psi = lmbda/2*(tr(E)**2) + mu*tr(E*E) S = diff(psi, E) # Second Piola-Kirchhoff stress tensor P = F*S # First Piola-Kirchoff stress tensor # The variational problem corresponding to hyperelasticity L = inner(P, grad(v))*dx - inner(B, v)*dx - inner(T, v)*ds a = derivative(L, u, du) ffc-1.6.0/demo/Mass.ufl000066400000000000000000000016051255571034100146200ustar00rootroot00000000000000# Copyright (C) 2004-2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # The bilinear form for a mass matrix. # # Compile this form with FFC: ffc Mass.ufl element = FiniteElement("Lagrange", tetrahedron, 3) v = TestFunction(element) u = TrialFunction(element) a = u*v*dx ffc-1.6.0/demo/MathFunctions.ufl000066400000000000000000000034301255571034100164750ustar00rootroot00000000000000# Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Test all algebra operators on Coefficients. # # Compile this form with FFC: ffc MathFunctions.ufl element = FiniteElement("Lagrange", triangle, 1) c0 = Coefficient(element) c1 = Coefficient(element) s0 = 3*c0 - c1 p0 = c0*c1 f0 = c0/c1 integrand = sqrt(c0) + sqrt(s0) + sqrt(p0) + sqrt(f0)\ + exp(c0) + exp(s0) + exp(p0) + exp(f0)\ + ln(c0) + ln(s0) + ln(p0) + ln(f0)\ + cos(c0) + cos(s0) + cos(p0) + cos(f0)\ + sin(c0) + sin(s0) + sin(p0) + sin(f0)\ + tan(c0) + tan(s0) + tan(p0) + tan(f0)\ + acos(c0) + acos(s0) + acos(p0) + acos(f0)\ + asin(c0) + asin(s0) + asin(p0) + asin(f0)\ + atan(c0) + atan(s0) + atan(p0) + atan(f0)\ + erf(c0) + erf(s0) + erf(p0) + erf(f0)\ + bessel_I(1, c0) + bessel_I(1, s0) + bessel_I(0, p0) + bessel_I(0, f0)\ + bessel_J(1, c0) + bessel_J(1, s0) + bessel_J(0, p0) + bessel_J(0, f0)\ + bessel_K(1, c0) + bessel_K(1, s0) + bessel_K(0, p0) + bessel_K(0, f0)\ + bessel_Y(1, c0) + bessel_Y(1, s0) + bessel_Y(0, p0) + bessel_Y(0, f0) a = integrand*dx ffc-1.6.0/demo/MetaData.ufl000066400000000000000000000026641255571034100154030ustar00rootroot00000000000000# Copyright (C) 2009 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Test form for metadata # # Compile this form with FFC: ffc MetaData.ufl element = FiniteElement("Lagrange", triangle, 1) u = TrialFunction(element) v = TestFunction(element) # Three terms on the same subdomain using different representations a_0 = u*v*dx(0, {"representation":"tensor"})\ + inner(grad(u), grad(v))*dx(0, {"representation": "quadrature"})\ + inner(grad(u), grad(v))*dx(0, {"representation": "auto"}) # Three terms on different subdomains using different representations and order a_1 = inner(grad(u), grad(v))*dx(0, {"representation":"tensor"}, degree=8)\ + inner(grad(u), grad(v))*dx(1, {"representation":"quadrature"}, degree=4)\ + inner(grad(u), grad(v))*dx(1, {"representation":"auto"}, degree="auto") a = a_0 + a_1 ffc-1.6.0/demo/Mini.ufl000066400000000000000000000023531255571034100146120ustar00rootroot00000000000000# Copyright (C) 2010 Marie E. Rognes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Illustration of vector sum of elements (EnrichedElement): The # bilinear form a(u, v) for the Stokes equations using a mixed # formulation involving the Mini element. The velocity element is # composed of a P1 element augmented by the cubic bubble function. # Compile this form with FFC: ffc Mini.ufl P1 = VectorElement("Lagrange", triangle, 1) B = VectorElement("Bubble", triangle, 3) Q = FiniteElement("CG", triangle, 1) Mini = (P1 + B)*Q (u, p) = TrialFunctions(Mini) (v, q) = TestFunctions(Mini) a = (inner(grad(u), grad(v)) - div(v)*p + div(u)*q)*dx ffc-1.6.0/demo/MixedMixedElement.ufl000066400000000000000000000017201255571034100172620ustar00rootroot00000000000000# Copyright (C) 2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # A mixed element of mixed elements # # Compile this form with FFC: ffc MixedMixedElement.ufl cell = triangle DG = VectorElement("DG", cell, 0) CG = FiniteElement("Lagrange", cell, 2) RT = FiniteElement("RT", cell, 3) element = DG * CG * RT v = TestFunction(element) a = v[0]*dx ffc-1.6.0/demo/MixedPoisson.ufl000066400000000000000000000023141255571034100163340ustar00rootroot00000000000000# Copyright (C) 2006-2007 Anders Logg and Marie E. Rognes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # The bilinear form a and linear form L for a mixed formulation of # Poisson's equation with BDM (Brezzi-Douglas-Marini) elements. # Compile this form with FFC: ffc MixedPoisson.ufl q = 1 BDM = FiniteElement("Brezzi-Douglas-Marini", triangle, q) DG = FiniteElement("Discontinuous Lagrange", triangle, q - 1) mixed_element = BDM * DG (sigma, u) = TrialFunctions(mixed_element) (tau, w) = TestFunctions(mixed_element) f = Coefficient(DG) a = (inner(sigma, tau) - div(tau)*u + div(sigma)*w)*dx L = f*w*dx ffc-1.6.0/demo/MixedPoissonDual.ufl000066400000000000000000000022521255571034100171430ustar00rootroot00000000000000# Copyright (C) 2014 Jan Blechta # # This file is part of FFC. # # DOLFIN is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DOLFIN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DOLFIN. If not, see . # # First added: 2014-01-29 # Last changed: 2014-01-29 # # The bilinear form a(u, v) and linear form L(v) for a two-field # (mixed) formulation of Poisson's equation DRT = FiniteElement("DRT", triangle, 2) CG = FiniteElement("CG", triangle, 3) W = DRT * CG (sigma, u) = TrialFunctions(W) (tau, v) = TestFunctions(W) CG1 = FiniteElement("CG", triangle, 1) f = Coefficient(CG1) g = Coefficient(CG1) a = (dot(sigma, tau) + dot(grad(u), tau) + dot(sigma, grad(v)))*dx L = - f*v*dx - g*v*ds ffc-1.6.0/demo/NavierStokes.ufl000066400000000000000000000017761255571034100163430ustar00rootroot00000000000000# Copyright (C) 2004-2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # The bilinear form for the nonlinear term in the # Navier-Stokes equations with fixed convective velocity. # # Compile this form with FFC: ffc NavierStokes.ufl element = VectorElement("Lagrange", tetrahedron, 1) u = TrialFunction(element) v = TestFunction(element) w = Coefficient(element) a = w[j]*Dx(u[i], j)*v[i]*dx ffc-1.6.0/demo/NeumannProblem.ufl000066400000000000000000000020731255571034100166370ustar00rootroot00000000000000# Copyright (C) 2006-2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # The bilinear form a(u, v) and linear form L(v) for # Poisson's equation with Neumann boundary conditions. # # Compile this form with FFC: ffc NeumannProblem.ufl element = VectorElement("Lagrange", triangle, 1) u = TrialFunction(element) v = TestFunction(element) f = Coefficient(element) g = Coefficient(element) a = inner(grad(u), grad(v))*dx L = inner(f, v)*dx + inner(g, v)*ds ffc-1.6.0/demo/Normals.ufl000066400000000000000000000017771255571034100153420ustar00rootroot00000000000000# Copyright (C) 2009 Peter Brune # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # This example demonstrates how to use the facet normals # Merely project the normal onto a vector section # # Compile this form with FFC: ffc Normals.ufl cell = triangle element = VectorElement("Lagrange", cell, 1) n = FacetNormal(cell) v = TrialFunction(element) u = TestFunction(element) a = dot(v, u)*ds L = dot(n, u)*ds ffc-1.6.0/demo/Optimization.ufl000066400000000000000000000017511255571034100164050ustar00rootroot00000000000000# Copyright (C) 2004-2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # The bilinear form a(u, v) and linear form L(v) for # Poisson's equation. # # Compile this form with FFC: ffc -O Optimization.ufl element = FiniteElement("Lagrange", triangle, 3) u = TrialFunction(element) v = TestFunction(element) f = Coefficient(element) a = inner(grad(u), grad(v))*dx L = f*v*dx ffc-1.6.0/demo/P5tet.ufl000066400000000000000000000015271255571034100147210ustar00rootroot00000000000000# Copyright (C) 2006-2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # A fifth degree Lagrange finite element on a tetrahedron # # Compile this form with FFC: ffc P5tet.ufl element = FiniteElement("Lagrange", tetrahedron, 5) ffc-1.6.0/demo/P5tri.ufl000066400000000000000000000015211255571034100147150ustar00rootroot00000000000000# Copyright (C) 2006-2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # A fifth degree Lagrange finite element on a triangle # # Compile this form with FFC: ffc P5tri.ufl element = FiniteElement("Lagrange", triangle, 5) ffc-1.6.0/demo/PointMeasure.ufl000066400000000000000000000022131255571034100163240ustar00rootroot00000000000000# Copyright (C) 2013 Marie E. Rognes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # This demo illustrates how to use a point measure: dP # # Compile this form with FFC: ffc PointMeasure.ufl element = FiniteElement("CG", triangle, 1) V = FiniteElement("CG", triangle, 2) u = TrialFunction(element) v = TestFunction(element) g = Coefficient(element) f = Coefficient(V) a = u*v*dP + g*g*u*v*dP(1) + u*v*dx element = FiniteElement("DG", tetrahedron, 1) V = FiniteElement("DG", tetrahedron, 2) v = TestFunction(element) f = Coefficient(V) L = v*f*dP ffc-1.6.0/demo/Poisson.ufl000066400000000000000000000017411255571034100153500ustar00rootroot00000000000000# Copyright (C) 2004-2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # The bilinear form a(u, v) and linear form L(v) for # Poisson's equation. # # Compile this form with FFC: ffc Poisson.ufl element = FiniteElement("Lagrange", triangle, 1) u = TrialFunction(element) v = TestFunction(element) f = Coefficient(element) a = inner(grad(u), grad(v))*dx L = f*v*dx ffc-1.6.0/demo/Poisson1D.ufl000066400000000000000000000017411255571034100155350ustar00rootroot00000000000000# Copyright (C) 2004-2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # The bilinear form a(u, v) and linear form L(v) for # Poisson's equation. # # Compile this form with FFC: ffc Poisson.ufl element = FiniteElement("Lagrange", interval, 1) u = TrialFunction(element) v = TestFunction(element) f = Coefficient(element) a = inner(grad(u), grad(v))*dx L = f*v*dx ffc-1.6.0/demo/PoissonDG.ufl000066400000000000000000000032331255571034100155610ustar00rootroot00000000000000# Copyright (C) 2006-2007 Kristian B. Oelgaard and Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2006-12-05 # Last changed: 2011-03-08 # # The bilinear form a(u, v) and linear form L(v) for # Poisson's equation in a discontinuous Galerkin (DG) # formulation. # # Compile this form with FFC: ffc PoissonDG.ufl # Elements element = FiniteElement("Discontinuous Lagrange", triangle, 1) # Trial and test functions u = TrialFunction(element) v = TestFunction(element) f = Coefficient(element) # Normal component, mesh size and right-hand side n = FacetNormal(triangle) h = Constant(triangle) # Compute average of mesh size h_avg = (h('+') + h('-'))/2.0 # Neumann boundary conditions gN = Coefficient(element) # Parameters alpha = 4.0 gamma = 8.0 # Bilinear form a = inner(grad(u), grad(v))*dx \ - inner(jump(u, n), avg(grad(v)))*dS \ - inner(avg(grad(u)), jump(v, n))*dS \ + alpha/h_avg*inner(jump(u, n), jump(v, n))*dS \ - inner(u*n, grad(v))*ds \ - inner(grad(u), v*n)*ds \ + gamma/h*u*v*ds # Linear form L = f*v*dx + gN*v*ds ffc-1.6.0/demo/ProjectionManifold.ufl000066400000000000000000000022501255571034100175000ustar00rootroot00000000000000# Copyright (C) 2012 Marie E. Rognes and David Ham # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # This demo illustrates use of finite element spaces defined over # simplicies embedded in higher dimensions # # Compile this form with FFC: ffc ProjectionManifold.ufl # Define interval embedded in 3D: domain = Cell("triangle", geometric_dimension=3) # Define element over this domain V = FiniteElement("RT", domain, 1) Q = FiniteElement("DG", domain, 0) element = V*Q (u, p) = TrialFunctions(element) (v, q) = TestFunctions(element) a = (inner(u, v) + div(u)*q + div(v)*p)*dx ffc-1.6.0/demo/QuadratureElement.ufl000066400000000000000000000025141255571034100173440ustar00rootroot00000000000000# Copyright (C) 2008 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2008-03-31 # Last changed: 2014-03-04 # # The linearised bilinear form a(u, v) and linear form L(v) for # the nonlinear equation -div (1 + u) grad u = f (nonlinear Poisson) # # Compile this form with FFC: ffc QuadratureElement.ufl element = FiniteElement("Lagrange", triangle, 2) QE = FiniteElement("Quadrature", triangle, 3) sig = VectorElement("Quadrature", triangle, 3) u = TrialFunction(element) v = TestFunction(element) u0 = Coefficient(element) C = Coefficient(QE) sig0 = Coefficient(sig) f = Coefficient(element) a = C*u.dx(i)*v.dx(i)*dx + 2*u0*u0.dx(i)*u*v.dx(i)*dx L = f*v*dx - inner(sig0, grad(v))*dx ffc-1.6.0/demo/README000066400000000000000000000004461255571034100140670ustar00rootroot00000000000000To compile a form in this directory, just type ffc .ufl for example ffc Poisson.ufl To run these demos from within the source tree without needing to install FFC system-wide, update your paths according to export PATH="../scripts:$PATH" export PYTHONPATH="..:$PYTHONPATH" ffc-1.6.0/demo/ReactionDiffusion.ufl000066400000000000000000000020401255571034100173220ustar00rootroot00000000000000# Copyright (C) 2009 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # The bilinear form a(u, v) and linear form L(v) for a simple # reaction-diffusion equation using simplified tuple notation. # # Compile this form with FFC: ffc ReactionDiffusion.ufl element = FiniteElement("Lagrange", triangle, 1) u = TrialFunction(element) v = TestFunction(element) f = Coefficient(element) a = (inner(grad(u), grad(v)) + u*v)*dx L = f*v*dx ffc-1.6.0/demo/RestrictedElement.ufl000066400000000000000000000027571255571034100173500ustar00rootroot00000000000000# Copyright (C) 2009 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Restriction of a finite element. # The below syntax show how one can restrict a higher order Lagrange element # to only take into account those DOFs that live on the facets. # # Compile this form with FFC: ffc RestrictedElement.ufl # Restricted element CG_R = FiniteElement("Lagrange", triangle, 4)["facet"] u_r = TrialFunction(CG_R) v_r = TestFunction(CG_R) a = avg(v_r)*avg(u_r)*dS + v_r*u_r*ds #CG = FiniteElement("Lagrange", triangle, 4) #CG_R = CG["facet"] #u_r = TrialFunction(CG_R) #v_r = TestFunction(CG_R) #a = v_r('+')*u_r('+')*dS + v_r('-')*u_r('-')*dS + v_r*u_r*ds # Mixed element #CG = FiniteElement("Lagrange", triangle, 4) #CG_R = CG["facet"] #ME = CG * CG_R #u, u_r = TrialFunctions(ME) #v, v_r = TestFunctions(ME) #a = v*u*dx + v_r('+')*u_r('+')*dS + v_r('+')*u_r('+')*dS + v_r*u_r*ds ffc-1.6.0/demo/SpatialCoordinates.ufl000066400000000000000000000022611255571034100175040ustar00rootroot00000000000000# Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # The bilinear form a(u, v) and linear form L(v) for # Poisson's equation where spatial coordinates are used to define the source # and boundary flux terms. # # Compile this form with FFC: ffc SpatialCoordinates.ufl element = FiniteElement("Lagrange", triangle, 2) u = TrialFunction(element) v = TestFunction(element) x = SpatialCoordinate(triangle) d_x = x[0] - 0.5 d_y = x[1] - 0.5 f = 10.0*exp(-(d_x*d_x + d_y*d_y) / 0.02) g = sin(5.0*x[0]) a = inner(grad(u), grad(v))*dx L = f*v*dx + g*v*ds ffc-1.6.0/demo/StabilisedStokes.ufl000066400000000000000000000023511255571034100171700ustar00rootroot00000000000000# Copyright (c) 2005-2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # The bilinear form a(u, v) and Linear form L(v) for the Stokes # equations using a mixed formulation (equal-order stabilized). # # Compile this form with FFC: ffc Stokes.ufl vector = VectorElement("Lagrange", triangle, 1) scalar = FiniteElement("Lagrange", triangle, 1) system = vector * scalar (u, p) = TrialFunctions(system) (v, q) = TestFunctions(system) f = Coefficient(vector) h = Coefficient(scalar) beta = 0.2 delta = beta*h*h a = (inner(grad(u), grad(v)) - div(v)*p + div(u)*q + delta*dot(grad(p), grad(q)))*dx L = dot(f, v + delta*grad(q))*dx ffc-1.6.0/demo/Stokes.ufl000066400000000000000000000021441255571034100151640ustar00rootroot00000000000000# Copyright (C) 2005-2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # The bilinear form a(u, v) and Linear form L(v) for the Stokes # equations using a mixed formulation (Taylor-Hood elements). # Compile this form with FFC: ffc Stokes.ufl P2 = VectorElement("Lagrange", triangle, 2) P1 = FiniteElement("Lagrange", triangle, 1) TH = P2 * P1 (u, p) = TrialFunctions(TH) (v, q) = TestFunctions(TH) f = Coefficient(P2) a = (inner(grad(u), grad(v)) - div(v)*p + div(u)*q)*dx L = inner(f, v)*dx ffc-1.6.0/demo/SubDomain.ufl000066400000000000000000000017531255571034100156020ustar00rootroot00000000000000# Copyright (C) 2008 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # This example illustrates how to define a form over a # given subdomain of a mesh, in this case a functional. # # Compile this form with FFC: ffc SubDomain.ufl element = FiniteElement("CG", tetrahedron, 1) v = TestFunction(element) u = TrialFunction(element) f = Coefficient(element) M = f*dx(2) + f*ds(5) ffc-1.6.0/demo/SubDomains.ufl000066400000000000000000000021321255571034100157550ustar00rootroot00000000000000# Copyright (C) 2008 Anders Logg and Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # This simple example illustrates how forms can be defined on # different sub domains. It is supported for all three integral # types. # # Compile this form with FFC: ffc SubDomains.ufl element = FiniteElement("CG", tetrahedron, 1) u = TrialFunction(element) v = TestFunction(element) a = u*v*dx(0) + 10.0*u*v*dx(1) + u*v*ds(0) + 2.0*u*v*ds(1) + u('+')*v('+')*dS(0) + 4.3*u('+')*v('+')*dS(1) ffc-1.6.0/demo/TensorWeightedPoisson.ufl000066400000000000000000000020421255571034100202170ustar00rootroot00000000000000# Copyright (C) 2005-2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # The bilinear form a(u, v) for tensor-weighted Poisson's equation. # # Compile this form with FFC: ffc TensorWeightedPoisson.ufl P1 = FiniteElement("Lagrange", triangle, 1) P0 = TensorElement("Discontinuous Lagrange", triangle, 0, (2, 2)) u = TrialFunction(P1) v = TestFunction(P1) f = Coefficient(P1) C = Coefficient(P0) a = inner(C*grad(u), grad(v))*dx ffc-1.6.0/demo/TraceElement.ufl000066400000000000000000000001671255571034100162670ustar00rootroot00000000000000 element = FiniteElement("Discontinuous Lagrange Trace", "triangle", 0) v = TestFunction(element) L = v*ds + avg(v)*dS ffc-1.6.0/demo/VectorLaplaceGradCurl.ufl000066400000000000000000000032531255571034100200660ustar00rootroot00000000000000# Copyright (C) 2007 Marie Rognes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # The bilinear form a(u, v) and linear form L(v) for the Hodge Laplace # problem using 0- and 1-forms. Intended to demonstrate use of N1curl # elements. # Compile this form with FFC: ffc VectorLaplaceGradCurl.ufl def HodgeLaplaceGradCurl(element, felement): """This is a formulation of the Hodge Laplacian using k=1 and n=3, i.e 0-forms and 1-forms in 3D. Appropriate elements are GRAD \times CURL = Lagrange_r \ times Ned^1_{r} Lagrange_{r+1} \ times Ned^2_{r} """ (sigma, u) = TrialFunctions(element) (tau, v) = TestFunctions(element) f = Coefficient(felement) a = (inner(sigma, tau) - inner(grad(tau), u) + inner(grad(sigma), v) + inner(curl(u), curl(v)))*dx L = inner(f, v)*dx return [a, L] shape = tetrahedron order = 1 GRAD = FiniteElement("Lagrange", shape, order) CURL = FiniteElement("N1curl", shape, order) VectorLagrange = VectorElement("Lagrange", shape, order+1) [a, L] = HodgeLaplaceGradCurl(GRAD * CURL, VectorLagrange) ffc-1.6.0/demo/VectorPoisson.ufl000066400000000000000000000017741255571034100165410ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # The bilinear form a(u, v) and linear form L(v) for # the vector-valued Poisson's equation. # # Compile this form with FFC: ffc VectorPoisson.ufl element = VectorElement("Lagrange", triangle, 1) u = TrialFunction(element) v = TestFunction(element) f = Coefficient(element) a = inner(grad(u), grad(v))*dx L = inner(f, v)*dx ffc-1.6.0/demo/plotelements.py000066400000000000000000000040521255571034100162710ustar00rootroot00000000000000"This program demonstrates how to plot finite elements with FFC." # Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-12-07 # Last changed: 2010-12-10 from ufl import * from ffc import * #element = FiniteElement("Argyris", triangle, 5) #element = FiniteElement("Arnold-Winther", triangle) #element = FiniteElement("Brezzi-Douglas-Marini", triangle, 3) element = FiniteElement("Brezzi-Douglas-Marini", tetrahedron, 3) #element = FiniteElement("Crouzeix-Raviart", triangle, 1) #element = FiniteElement("Crouzeix-Raviart", tetrahedron, 1) #element = FiniteElement("Discontinuous Lagrange", triangle, 3) #element = FiniteElement("Discontinuous Lagrange", tetrahedron, 3) #element = FiniteElement("Hermite", triangle) #element = FiniteElement("Hermite", tetrahedron) #element = FiniteElement("Lagrange", triangle, 3) #element = FiniteElement("Lagrange", tetrahedron, 3) #element = FiniteElement("Mardal-Tai-Winther", triangle) #element = FiniteElement("Morley", triangle) #element = FiniteElement("Nedelec 1st kind H(curl)", triangle, 3) #element = FiniteElement("Nedelec 1st kind H(curl)", tetrahedron, 3) #element = FiniteElement("Nedelec 2nd kind H(curl)", triangle, 3) #element = FiniteElement("Nedelec 2nd kind H(curl)", tetrahedron, 1) #element = FiniteElement("Raviart-Thomas", triangle, 3) #element = FiniteElement("Raviart-Thomas", tetrahedron, 3) plot(element) #plot(element, rotate=False) #plot("notation") ffc-1.6.0/doc/000077500000000000000000000000001255571034100130245ustar00rootroot00000000000000ffc-1.6.0/doc/man/000077500000000000000000000000001255571034100135775ustar00rootroot00000000000000ffc-1.6.0/doc/man/man1/000077500000000000000000000000001255571034100144335ustar00rootroot00000000000000ffc-1.6.0/doc/man/man1/ffc.1.gz000066400000000000000000000055621255571034100157020ustar00rootroot00000000000000ANffc.1Yn#7+,esKx3uƃ'b nDElʗ"jrfXzZ=\__}#buNqk;yviO/݇{z%YnßϞO+[e77/F+WWzV&W4oZx͇"{(w}MkV^6p=`btR7 ֕{\z[YZ { UUAj9e…Ԫڊ4c7G jcɾ{ gSn]9,VEY)+ZAr?7yx9[鴠zue6hxAX};)-b^'8DI_ qhkr~XqkBUlmaj}(YўH) =G29';p8ZZ8h GD  7(2 dj:rAIaz@t1Ë=Rʴ\ <5߿Tw\`կ E&t:H?E~2q5y:3g|:,g.hs&p} }ulӚ hV.rZ6=vU=w~AIЃҼ#3vSa#3Ʃv<>2JSF'nʃ頸OU3EF.Zȳ\Dڵ)RSPvS-ino/ g,|'G/~?ŤϏR@S9l(|r#ǰpSokMT燓BumdAÄpFPuDdJM>-uuƹ;EP`rx &|B<9n`0?';ٞ[3n20s7^kvRSjn3%]#<"tQCI`8Rj |u{y/7rϦY(J+71)hLfcA83kxWZCǭNDk& )zǕ툦j .P"kz[NV\` qL7~􌃨#MéaP:N܍$sIpX{PL8р7*C&PԄR~$ۣ(ȶuk$l8Fhҥz8 6f;(+$"k7ϻ=Jx  p^+=BCH`6Aj~ddX#4*B$@PIeͩ&Z~8%J)wjdA8EV9K(i+'q@A >)+kb%__q,oGL; 4(uL  3]p<c:g;kmzH *tdPJ<,ڦϹZv<=420>4$-&)I :ry1 0>]ư, Jy¡(4ժ><<[Wp6C[b!ݭ)d1C;>2Y 3'˨#-gQ@)&fy'V1q- ,"mTYeNiOq"'ZBtq;sxC:ϧ#'9 :JU" » @1E yHSyX1E"ڱ"ǕO5Š) S .qp@B:@f-Yg >oeI^ݤ/@~BrjIv=bvYLJ.˔W aN\ Cq C`T,jwxαKa|RV&ohzA2I!Yvu!&3L$yռ7&K~<״^g6Ƶ_OL^|bD9)@RΗFaᎄ<*z&~&F*FH%hdݞH8<)/|&4>y=o`-$.k Uŵ#"KHEuU.䂷[0a`K ЧD;~%t\B6 T "jZ+y..@-m`CcӉ{}s06 iA3B҆VN5ڤ)Uvpχ"x*K>c[ib|mVY;TI%5' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/FFC.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/FFC.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/FFC" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/FFC" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." make -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." ffc-1.6.0/doc/sphinx/scripts/000077500000000000000000000000001255571034100160245ustar00rootroot00000000000000ffc-1.6.0/doc/sphinx/scripts/README_generate_modules.rst000066400000000000000000000021211255571034100231110ustar00rootroot00000000000000 Generate Modules ================ This script parses a directory tree looking for python modules and packages and creates ReST files appropriately to create code documentation with Sphinx. It also creates a modules index. Usage:: Usage: generate_modules.py [options] [exclude paths, ...] Note: By default this script will not overwrite already created files. Options: -h, --help show this help message and exit -n HEADER, --doc-header=HEADER Documentation Header (default=Project) -d DESTDIR, --dest-dir=DESTDIR Output destination directory -s SUFFIX, --suffix=SUFFIX module suffix (default=txt) -m MAXDEPTH, --maxdepth=MAXDEPTH Maximum depth of submodules to show in the TOC (default=4) -r, --dry-run Run the script without creating the files -f, --force Overwrite all the files -t, --no-toc Don't create the table of content file ffc-1.6.0/doc/sphinx/scripts/generate_index.py000077500000000000000000000047341255571034100213720ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2011 Marie E. Rognes # # This file is part of UFL. # # UFL is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # UFL is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with UFL. If not, see . # # First added: 2011-06-09 # Last changed: 2011-06-09 # # This is a naive utility script for adding some labels to generated # .rst and for creating a main level index. It is used by # scripts/makedoc after generating .rst # import os, sys index_template = """ ############################################# Documentation for FFC v%s ############################################# FFC Library Reference ===================== * :ref:`FFC Programmer's Reference ` (Packages and Modules. Everything.) .. toctree:: :hidden: :maxdepth: 1 modules """ def insert_labels(directory, filenames): """ Insert labels based on filename for those files defined by the given filenames relative to directory """ for name in filenames: filename = os.path.join(directory, name) file = open(filename) text = file.read() file.close() label = "\n.. _%s_package:\n\n" % "_".join(name.split(".")[:-1]) modded_text = label + text print("Adding label to %s" % filename) file = open(filename, "w") file.write(modded_text) file.close() def generate_index_file(output_dir, version): text = index_template % version filename = os.path.join(output_dir, "index.rst") print("Writing documentation index file to %s" % filename) file = open(filename, "w") file.write(text) file.close() def main(input_dir, version): files = ["ffc.rst", "ffc.tensor.rst", "ffc.quadrature.rst", "ffc.errorcontrol.rst"] insert_labels(input_dir, files) generate_index_file(input_dir, version) if __name__ == '__main__': if len(sys.argv) != 3: print("Usage: python generate_index.py input_directory version") exit() main(sys.argv[1], sys.argv[2]) ffc-1.6.0/doc/sphinx/scripts/generate_modules.py000077500000000000000000000237221255571034100217310ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- """ sphinx-autopackage-script This script parses a directory tree looking for python modules and packages and creates ReST files appropriately to create code documentation with Sphinx. It also creates a modules index (named modules.). """ # Copyright 2008 Société des arts technologiques (SAT), http://www.sat.qc.ca/ # Copyright 2010 Thomas Waldmann # All rights reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # Modified by Marie E. Rognes (meg@simula.no), 2011 import os import optparse # automodule options OPTIONS = ['members', 'undoc-members', # 'inherited-members', # disabled because there's a bug in sphinx 'show-inheritance', ] INIT = '__init__.py' def makename(package, module): """Join package and module with a dot.""" # Both package and module can be None/empty. if package: name = package if module: name += '.' + module else: name = module return name def write_file(name, text, opts): """Write the output file for module/package .""" if opts.dryrun: return fname = os.path.join(opts.destdir, "%s.%s" % (name, opts.suffix)) if not opts.force and os.path.isfile(fname): print('File %s already exists, skipping.' % fname) else: print('Creating file %s.' % fname) f = open(fname, 'w') f.write(text) f.close() def format_heading(level, text): """Create a heading of [1, 2 or 3 supported].""" underlining = ['=', '-', '~', ][level-1] * len(text) return '%s\n%s\n\n' % (text, underlining) def format_directive(module, package=None): """Create the automodule directive and add the options.""" directive = '.. automodule:: %s\n' % makename(package, module) # MER: Treat stuff from __init__ a little differently. if "__init__" in module: directive += ' :%s:\n' % "noindex" else: for option in OPTIONS: directive += ' :%s:\n' % option return directive def create_module_file(package, module, opts): """Build the text of the file and write the file.""" text = format_heading(1, '%s Module' % module) text += format_heading(2, ':mod:`%s` Module' % module) text += format_directive(module, package) write_file(makename(package, module), text, opts) def create_package_file(root, master_package, subroot, py_files, opts, subs): """Build the text of the file and write the file.""" package = os.path.split(root)[-1] text = format_heading(1, '%s Package' % package) # add each package's module for py_file in py_files: if shall_skip(os.path.join(root, py_file)): continue is_package = py_file == INIT py_file = os.path.splitext(py_file)[0] py_path = makename(subroot, py_file) if is_package: heading = ':mod:`%s` Package' % package else: heading = ':mod:`%s` Module' % py_file text += format_heading(2, heading) text += format_directive(is_package and subroot or py_path, master_package) text += '\n' # build a list of directories that are packages (they contain an INIT file) subs = [sub for sub in subs if os.path.isfile(os.path.join(root, sub, INIT))] # if there are some package directories, add a TOC for theses subpackages if subs: text += format_heading(2, 'Subpackages') text += '.. toctree::\n\n' for sub in subs: text += ' %s.%s\n' % (makename(master_package, subroot), sub) text += '\n' write_file(makename(master_package, subroot), text, opts) def create_modules_toc_file(master_package, modules, opts, name='modules'): """ Create the module's index. """ text = format_heading(1, '%s Modules' % opts.header) text += '.. toctree::\n' text += ' :maxdepth: %s\n\n' % opts.maxdepth modules.sort() prev_module = '' for module in modules: # look if the module is a subpackage and, if yes, ignore it if module.startswith(prev_module + '.'): continue prev_module = module text += ' %s\n' % module write_file(name, text, opts) def shall_skip(module): """ Check if we want to skip this module. """ # skip it, if there is nothing (or just \n or \r\n) in the file return (os.path.getsize(module) < 3) def recurse_tree(path, excludes, opts): """ Look for every file in the directory tree and create the corresponding ReST files. """ # use absolute path for root, as relative paths like '../../foo' cause # 'if "/." in root ...' to filter out *all* modules otherwise path = os.path.abspath(path) # check if the base directory is a package and get is name if INIT in os.listdir(path): package_name = path.split(os.path.sep)[-1] else: package_name = None toc = [] tree = os.walk(path, False) for root, subs, files in tree: # keep only the Python script files py_files = sorted([f for f in files if os.path.splitext(f)[1] == '.py']) if INIT in py_files: py_files.remove(INIT) py_files.insert(0, INIT) # remove hidden ('.') and private ('_') directories subs = sorted([sub for sub in subs if sub[0] not in ['.', '_']]) # check if there are valid files to process # TODO: could add check for windows hidden files if "/." in root or "/_" in root \ or not py_files \ or is_excluded(root, excludes): continue if INIT in py_files: # we are in package ... if (# ... with subpackage(s) subs or # ... with some module(s) len(py_files) > 1 or # ... with a not-to-be-skipped INIT file not shall_skip(os.path.join(root, INIT)) ): subroot = root[len(path):].lstrip(os.path.sep).replace(os.path.sep, '.') create_package_file(root, package_name, subroot, py_files, opts, subs) toc.append(makename(package_name, subroot)) elif root == path: # if we are at the root level, we don't require it to be a package for py_file in py_files: if not shall_skip(os.path.join(path, py_file)): module = os.path.splitext(py_file)[0] create_module_file(package_name, module, opts) toc.append(makename(package_name, module)) # create the module's index if not opts.notoc: create_modules_toc_file(package_name, toc, opts) def normalize_excludes(rootpath, excludes): """ Normalize the excluded directory list: * must be either an absolute path or start with rootpath, * otherwise it is joined with rootpath * with trailing slash """ sep = os.path.sep f_excludes = [] for exclude in excludes: if not os.path.isabs(exclude) and not exclude.startswith(rootpath): exclude = os.path.join(rootpath, exclude) if not exclude.endswith(sep): exclude += sep f_excludes.append(exclude) return f_excludes def is_excluded(root, excludes): """ Check if the directory is in the exclude list. Note: by having trailing slashes, we avoid common prefix issues, like e.g. an exlude "foo" also accidentally excluding "foobar". """ sep = os.path.sep if not root.endswith(sep): root += sep for exclude in excludes: if root.startswith(exclude): return True return False def main(): """ Parse and check the command line arguments. """ parser = optparse.OptionParser(usage="""usage: %prog [options] [exclude paths, ...] Note: By default this script will not overwrite already created files.""") parser.add_option("-n", "--doc-header", action="store", dest="header", help="Documentation Header (default=Project)", default="Project") parser.add_option("-d", "--dest-dir", action="store", dest="destdir", help="Output destination directory", default="") parser.add_option("-s", "--suffix", action="store", dest="suffix", help="module suffix (default=txt)", default="txt") parser.add_option("-m", "--maxdepth", action="store", dest="maxdepth", help="Maximum depth of submodules to show in the TOC (default=4)", type="int", default=4) parser.add_option("-r", "--dry-run", action="store_true", dest="dryrun", help="Run the script without creating the files") parser.add_option("-f", "--force", action="store_true", dest="force", help="Overwrite all the files") parser.add_option("-t", "--no-toc", action="store_true", dest="notoc", help="Don't create the table of content file") (opts, args) = parser.parse_args() if not args: parser.error("package path is required.") else: rootpath, excludes = args[0], args[1:] if os.path.isdir(rootpath): # check if the output destination is a valid directory if opts.destdir and os.path.isdir(opts.destdir): excludes = normalize_excludes(rootpath, excludes) recurse_tree(rootpath, excludes, opts) else: print('%s is not a valid output destination directory.' % opts.destdir) else: print('%s is not a valid directory.' % rootpath) if __name__ == '__main__': main() ffc-1.6.0/doc/sphinx/source/000077500000000000000000000000001255571034100156355ustar00rootroot00000000000000ffc-1.6.0/doc/sphinx/source/conf.py000066400000000000000000000217631255571034100171450ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # FFC documentation build configuration file, created by # sphinx-quickstart on Wed Nov 16 23:12:01 2011. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'The FEniCS Project' copyright = u'2011, FFC Core' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '' # The full version, including alpha/beta/rc tags. release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'nature' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'FFCdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'FFC.tex', u'FFC Documentation', u'FFC Core', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ffc', u'FFC Documentation', [u'FFC Core'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'FFC', u'FFC Documentation', u'FFC Core', 'FFC', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'FFC' epub_author = u'FFC Core' epub_publisher = u'FFC Core' epub_copyright = u'2011, FFC Core' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None} ffc-1.6.0/doc/sphinx/source/index.rst000066400000000000000000000004731255571034100175020ustar00rootroot00000000000000 ############################################# Documentation for FFC v1.0-beta2 ############################################# FFC Library Reference ===================== * :ref:`FFC Programmer's Reference ` (Packages and Modules. Everything.) .. toctree:: :hidden: :maxdepth: 1 modules ffc-1.6.0/ffc/000077500000000000000000000000001255571034100130155ustar00rootroot00000000000000ffc-1.6.0/ffc/__init__.py000066400000000000000000000026371255571034100151360ustar00rootroot00000000000000""" FEniCS Form Compiler (FFC) -------------------------- FFC compiles finite element variational forms into C++ code. The interface consists of the following functions: compile_form - Compilation of forms compile_element - Compilation of finite elements jit - Just-In-Time compilation of forms and elements default_parameters - Default parameter values for FFC """ __version__ = "1.6.0" # Import compiler functions from ffc.compiler import compile_form, compile_element # Import JIT compiler from ffc.jitcompiler import jit # Import default parameters from .parameters import default_parameters # Import plotting from .plot import * # Import useful extra functionality from .extras import * # List of supported elements try: # Import list of supported elements from FIAT from FIAT import supported_elements supported_elements = list(supported_elements.keys()) supported_elements.sort() # Append elements that we can plot from .plot import element_colors supported_elements_for_plotting = list(set(supported_elements).union(set(element_colors.keys()))) supported_elements_for_plotting.sort() # Remove elements from list that we don't support or don't trust supported_elements.remove("Argyris") supported_elements.remove("Hermite") supported_elements.remove("Morley") except: supported_elements = [] supported_elements_for_plotting = [] ffc-1.6.0/ffc/analysis.py000066400000000000000000000344151255571034100152210ustar00rootroot00000000000000""" Compiler stage 1: Analysis -------------------------- This module implements the analysis/preprocessing of variational forms, including automatic selection of elements, degrees and form representation type. """ # Copyright (C) 2007-201r Anders Logg and Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Marie E. Rognes, 2010 # Modified by Martin Alnaes, 2013-2014 import os # UFL modules from ufl.common import istr, tstr from ufl.finiteelement import MixedElement, EnrichedElement from ufl.algorithms import estimate_total_polynomial_degree from ufl.algorithms import sort_elements from ufl.algorithms import compute_form_data # FFC modules from ffc.log import log, info, begin, end, warning, debug, error, ffc_assert, warning_blue from ffc.utils import all_equal from ffc.quadratureelement import default_quadrature_degree from ffc.utils import all_equal from ffc.tensor import estimate_cost def analyze_forms(forms, parameters): """ Analyze form(s), returning form_datas - a tuple of form_data objects unique_elements - a tuple of unique elements across all forms element_numbers - a mapping to unique numbers for all elements """ begin("Compiler stage 1: Analyzing form(s)") # Analyze forms form_datas = tuple(_analyze_form(form, parameters) for form in forms) # Extract unique elements accross all forms unique_elements = [] for form_data in form_datas: for element in form_data.unique_sub_elements: if not element in unique_elements: unique_elements.append(element) # Sort elements unique_elements = sort_elements(unique_elements) # Compute element numbers element_numbers = _compute_element_numbers(unique_elements) end() return form_datas, unique_elements, element_numbers def analyze_elements(elements, parameters): begin("Compiler stage 1: Analyzing form(s)") # Extract unique elements unique_elements = [] element_numbers = {} for element in elements: # Get all (unique) nested elements. for e in _get_nested_elements(element): # Check if element is present if not e in element_numbers: element_numbers[e] = len(unique_elements) unique_elements.append(e) # Sort elements unique_elements = sort_elements(unique_elements) # Build element map element_numbers = _compute_element_numbers(unique_elements) # Update scheme for QuadratureElements scheme = parameters["quadrature_rule"] if scheme == "auto": scheme = "default" for element in unique_elements: if element.family() == "Quadrature": element._quad_scheme = scheme end() return (), unique_elements, element_numbers def _compute_element_numbers(elements): "Build map from elements to element numbers." element_numbers = {} for (i, element) in enumerate(elements): element_numbers[element] = i return element_numbers def _get_nested_elements(element): "Get unique nested elements (including self)." nested_elements = [element] for e in element.sub_elements(): nested_elements += _get_nested_elements(e) return set(nested_elements) def _analyze_form(form, parameters): "Analyze form, returning form data." # Check that form is not empty ffc_assert(not form.empty(), "Form (%s) seems to be zero: cannot compile it." % str(form)) # Compute form metadata form_data = compute_form_data(form) info("") info(str(form_data)) # Attach integral meta data _attach_integral_metadata(form_data, parameters) return form_data def _attach_integral_metadata(form_data, parameters): "Attach integral metadata" # Recognized metadata keys metadata_keys = ("representation", "quadrature_degree", "quadrature_rule") # Iterate over integral collections quad_schemes = [] for ida in form_data.integral_data: # TODO: Is it possible to detach this from IntegralData? It's a bit strange from the ufl side. common_metadata = ida.metadata # Iterate over integrals integral_metadatas = [] for integral in ida.integrals: # Fill in integral metadata with default values # NB! This modifies the metadata of the input integral data! integral_metadata = integral.metadata() or {} for key in metadata_keys: if key not in integral_metadata: integral_metadata[key] = parameters[key] # Automatic selection of representation r = integral_metadata["representation"] # Hack to override representation with environment variable forced_r = os.environ.get("FFC_FORCE_REPRESENTATION") if forced_r: r = forced_r info("representation: forced --> %s" % r) elif r == "auto": r = _auto_select_representation(integral, form_data.unique_sub_elements, form_data.function_replace_map) info("representation: auto --> %s" % r) elif r in ("quadrature", "tensor", "uflacs"): info("representation: %s" % r) else: info("Valid choices are 'tensor', 'quadrature', 'uflacs', or 'auto'.") error("Illegal choice of representation for integral: " + str(r)) integral_metadata["representation"] = r # Automatic selection of quadrature degree qd = integral_metadata["quadrature_degree"] # Special case: handling -1 as "auto" for quadrature_degree if qd in ("auto", -1): qd = _auto_select_quadrature_degree(integral.integrand(), r, form_data.unique_sub_elements, form_data.element_replace_map) info("quadrature_degree: auto --> %d" % qd) else: qd = int(qd) info("quadrature_degree: %d" % qd) # Validate degree if not qd >= 0: info("Valid choices are nonnegative integers or 'auto'.") error("Illegal quadrature degree for integral: " + str(qd)) tdim = integral.domain().topological_dimension() _check_quadrature_degree(qd, tdim) integral_metadata["quadrature_degree"] = qd assert isinstance(qd, int) # Automatic selection of quadrature rule qr = integral_metadata["quadrature_rule"] if qr == "auto": # Just use default for now. qr = "default" info("quadrature_rule: auto --> %s" % qr) elif qr in ("default", "canonical", "vertex"): info("quadrature_rule: %s" % qr) else: info("Valid choices are 'default', 'canonical', 'vertex', and 'auto'.") error("Illegal choice of quadrature rule for integral: " + str(qr)) integral_metadata["quadrature_rule"] = qr quad_schemes.append(qr) # Append to list of metadata integral_metadatas.append(integral_metadata) # Extract common metadata for integral collection if len(ida.integrals) == 1: common_metadata.update(integral_metadatas[0]) else: # Check that representation is the same # (Generating code with different representations within a # single tabulate_tensor is considered not worth the effort) representations = [md["representation"] for md in integral_metadatas] if all_equal(representations): r = representations[0] else: r = "quadrature" info("Integral representation must be equal within each sub domain, using %s representation." % r) # Check that quadrature degree is the same # FIXME: Why must the degree within a sub domain be the same? # This makes no sense considering that num_points is # used as a key all over in quadrature representation... quadrature_degrees = [md["quadrature_degree"] for md in integral_metadatas] if all_equal(quadrature_degrees): qd = quadrature_degrees[0] else: qd = max(quadrature_degrees) info("Quadrature degree must be equal within each sub domain, using degree %d." % qd) assert isinstance(qd, int) # Check that quadrature rule is the same # FIXME: Why must the rule within a sub domain be the same? # To support this would be more work since num_points is used # to identify quadrature rules in the quadrature representation. quadrature_rules = [md["quadrature_rule"] for md in integral_metadatas] if all_equal(quadrature_rules): qr = quadrature_rules[0] else: qr = "canonical" info("Quadrature rule must be equal within each sub domain, using %s rule." % qr) # Update common metadata assert isinstance(qd, int) common_metadata["representation"] = r common_metadata["quadrature_degree"] = qd common_metadata["quadrature_rule"] = qr # Update scheme for QuadratureElements if quad_schemes and all_equal(quad_schemes): scheme = quad_schemes[0] else: scheme = "canonical" info("Quadrature rule must be equal within each sub domain, using %s rule." % scheme) # FIXME: This modifies the elements depending on the form compiler parameters, # this is a serious breach of the immutability of ufl objects, since the # element quad scheme is part of the signature and hash of the element... for element in form_data.sub_elements: if element.family() == "Quadrature": element._quad_scheme = scheme def _get_sub_elements(element): "Get sub elements." sub_elements = [element] if isinstance(element, MixedElement): for e in element.sub_elements(): sub_elements += _get_sub_elements(e) elif isinstance(element, EnrichedElement): for e in element._elements: sub_elements += _get_sub_elements(e) return sub_elements def _auto_select_representation(integral, elements, function_replace_map): """ Automatically select a suitable representation for integral. Note that the selection is made for each integral, not for each term. This means that terms which are grouped by UFL into the same integral (if their measures are equal) will necessarily get the same representation. """ # Skip unsupported integration domain types if integral.integral_type() == "vertex": return "quadrature" # Get ALL sub elements, needed to check for restrictions of EnrichedElements. sub_elements = [] for e in elements: sub_elements += _get_sub_elements(e) # Use quadrature representation if we have a quadrature element if len([e for e in sub_elements if e.family() == "Quadrature"]): return "quadrature" # Use quadrature representation if any elements are restricted to # ufl.Measure. This is used when integrals are computed over discontinuities. #if len([e for e in sub_elements if isinstance(e, ufl.RestrictedElement) and isinstance(e.cell_restriction(), Measure)]): # return "quadrature" # Estimate cost of tensor representation tensor_cost = estimate_cost(integral, function_replace_map) debug("Estimated cost of tensor representation: " + str(tensor_cost)) # Use quadrature if tensor representation is not possible if tensor_cost == -1: return "quadrature" # Otherwise, select quadrature when cost is high if tensor_cost <= 3: return "tensor" else: return "quadrature" def _auto_select_quadrature_degree(integrand, representation, elements, element_replace_map): "Automatically select a suitable quadrature degree for integrand." # TODO: Move this to form preprocessing, as part of integral_data? # Use quadrature element degree if any is found quadrature_degrees = [e.degree() for e in elements if e.family() == "Quadrature"] if quadrature_degrees: debug("Found quadrature element(s) with the following degree(s): " + str(quadrature_degrees)) ffc_assert(min(quadrature_degrees) == max(quadrature_degrees), \ "All QuadratureElements in an integrand must have the same degree: %s" \ % str(quadrature_degrees)) debug("Selecting quadrature degree based on quadrature element: " + str(quadrature_degrees[0])) ffc_assert(representation != "tensor", "Tensor representation does not support quadrature elements.") return quadrature_degrees[0] # Otherwise estimate total degree of integrand q = estimate_total_polynomial_degree(integrand, default_quadrature_degree, element_replace_map) debug("Selecting quadrature degree based on total polynomial degree of integrand: " + str(q)) return q def _check_quadrature_degree(degree, top_dim): """Check that quadrature degree does not result in a unreasonable high number of integration points.""" num_points = ((degree + 1 + 1) // 2)**top_dim if num_points >= 100: warning_blue("WARNING: The number of integration points for each cell will be: %d" % num_points) warning_blue(" Consider using the option 'quadrature_degree' to reduce the number of points") ffc-1.6.0/ffc/backends/000077500000000000000000000000001255571034100145675ustar00rootroot00000000000000ffc-1.6.0/ffc/backends/__init__.py000066400000000000000000000000001255571034100166660ustar00rootroot00000000000000ffc-1.6.0/ffc/backends/dolfin/000077500000000000000000000000001255571034100160425ustar00rootroot00000000000000ffc-1.6.0/ffc/backends/dolfin/__init__.py000066400000000000000000000000001255571034100201410ustar00rootroot00000000000000ffc-1.6.0/ffc/backends/dolfin/capsules.py000066400000000000000000000074501255571034100202410ustar00rootroot00000000000000# Copyright (C) 2008-2009 Martin Sandve Alnes # # This file is part of DOLFIN. # # DOLFIN is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DOLFIN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DOLFIN. If not, see . # # Modified by Marie E. Rognes # # Last changed: 2011-02-21 class UFCFormNames: "Encapsulation of the names related to a generated UFC form." def __init__(self, name, coefficient_names, ufc_form_classname, ufc_finite_element_classnames, ufc_dofmap_classnames, superclassname='Form'): """Arguments: @param name: Name of form (e.g. 'a', 'L', 'M'). @param coefficient_names: List of names of form coefficients (e.g. 'f', 'g'). @param ufc_form_classname: Name of ufc::form subclass. @param ufc_finite_element_classnames: List of names of ufc::finite_element subclasses (length rank + num_coefficients). @param ufc_dofmap_classnames: List of names of ufc::dofmap subclasses (length rank + num_coefficients). @param superclassname (optional): Name of dolfin super class (defaults to 'Form') """ assert len(coefficient_names) <= len(ufc_dofmap_classnames) assert len(ufc_finite_element_classnames) == len(ufc_dofmap_classnames) self.num_coefficients = len(coefficient_names) self.rank = len(ufc_finite_element_classnames) - self.num_coefficients self.name = name self.coefficient_names = coefficient_names self.ufc_form_classname = ufc_form_classname self.ufc_finite_element_classnames = ufc_finite_element_classnames self.ufc_dofmap_classnames = ufc_dofmap_classnames self.superclassname = superclassname def __str__(self): s = "UFCFormNames instance:\n" s += "rank: %d\n" % self.rank s += "num_coefficients: %d\n" % self.num_coefficients s += "name: %s\n" % self.name s += "coefficient_names: %s\n" % str(self.coefficient_names) s += "ufc_form_classname: %s\n" % str(self.ufc_form_classname) s += "finite_element_classnames: %s\n" % str(self.ufc_finite_element_classnames) s += "ufc_dofmap_classnames: %s\n" % str(self.ufc_dofmap_classnames) return s class UFCElementNames: "Encapsulation of the names related to a generated UFC element." def __init__(self, name, ufc_finite_element_classnames, ufc_dofmap_classnames): """Arguments: """ assert len(ufc_finite_element_classnames) == len(ufc_dofmap_classnames) self.name = name self.ufc_finite_element_classnames = ufc_finite_element_classnames self.ufc_dofmap_classnames = ufc_dofmap_classnames def __str__(self): s = "UFCFiniteElementNames instance:\n" s += "name: %s\n" \ % self.name s += "finite_element_classnames: %s\n" \ % str(self.ufc_finite_element_classnames) s += "ufc_dofmap_classnames: %s\n" \ % str(self.ufc_dofmap_classnames) return s ffc-1.6.0/ffc/backends/dolfin/form.py000066400000000000000000000213071255571034100173620ustar00rootroot00000000000000# Copyright (C) 2011 Marie E. Rognes # # This file is part of DOLFIN. # # DOLFIN is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DOLFIN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DOLFIN. If not, see . # # Based on original implementation by Martin Alnes and Anders Logg # # Modified by Anders Logg 2011 # # Last changed: 2011-11-15 from .includes import snippets from .functionspace import * from .goalfunctional import generate_update_ec __all__ = ["generate_form"] #------------------------------------------------------------------------------- def generate_form(form, classname): """Generate dolfin wrapper code associated with a form including code for function spaces used in form and typedefs @param form: A UFCFormNames instance @param classname Name of Form class. """ # Generate code for Form_x_FunctionSpace_y subclasses wrap = apply_function_space_template blocks = [wrap("%s_FunctionSpace_%d" % (classname, i), form.ufc_finite_element_classnames[i], form.ufc_dofmap_classnames[i]) for i in range(form.rank)] # Add typedefs CoefficientSpace_z -> Form_x_FunctionSpace_y blocks += ["typedef CoefficientSpace_%s %s_FunctionSpace_%d;\n" % (form.coefficient_names[i], classname, form.rank + i) for i in range(form.num_coefficients)] # Generate Form subclass blocks += [generate_form_class(form, classname)] # Return code return "\n".join(blocks) #------------------------------------------------------------------------------- def generate_form_class(form, classname): "Generate dolfin wrapper code for a single Form class." # Generate constructors constructors = generate_form_constructors(form, classname) # Generate data for coefficient assignments (number, name) = generate_coefficient_map_data(form) # Generate typedefs for FunctionSpace subclasses for Coefficients typedefs = [" // Typedefs", generate_typedefs(form, classname), ""] # Member variables for coefficients members = [" dolfin::CoefficientAssigner %s;" % coefficient for coefficient in form.coefficient_names] if form.superclassname == "GoalFunctional": members += [generate_update_ec(form)] # Group typedefs and members together for inserting into template additionals = "\n".join(typedefs + [" // Coefficients"] + members) # Wrap functions in class body code = apply_form_template(classname, constructors, number, name, additionals, form.superclassname) # Return code return code #------------------------------------------------------------------------------- def generate_coefficient_map_data(form): """Generate data for code for the functions Form::coefficient_number and Form::coefficient_name.""" # Write error if no coefficients if form.num_coefficients == 0: message = '''\ dolfin::dolfin_error("generated code for class %s", "access coefficient data", "There are no coefficients");''' % form.superclassname num = "\n %s\n return 0;" % message name = '\n %s\n return "unnamed";' % message return (num, name) # Otherwise create switch ifstr = "if " num = "" name = ' switch (i)\n {\n' for i, coeff in enumerate(form.coefficient_names): num += ' %s(name == "%s")\n return %d;\n' % (ifstr, coeff, i) name += ' case %d:\n return "%s";\n' % (i, coeff) ifstr = 'else if ' # Create final return message = '''\ dolfin::dolfin_error("generated code for class %s", "access coefficient data", "Invalid coefficient");''' % form.superclassname num += "\n %s\n return 0;" % message name += ' }\n\n %s\n return "unnamed";' % message return (num, name) #------------------------------------------------------------------------------- def generate_form_constructors(form, classname): """Generate the dolfin::Form constructors for different combinations of references/shared pointers etc.""" coeffs = ("referenced_coefficient", "shared_ptr_coefficient") spaces = ("referenced_space", "shared_ptr_space") # Treat functionals a little special if form.rank == 0: spaces = ("referenced_mesh", "shared_ptr_mesh") # Generate permutations of constructors constructors = [] for space in spaces: constructors += [generate_constructor(form, classname, space)] if form.num_coefficients > 0: constructors += [generate_constructor(form, classname, space, coeff) for coeff in coeffs] # Return joint constructor code return "\n\n".join(constructors) #------------------------------------------------------------------------------- def generate_constructor(form, classname, space_tag, coefficient_tag=None): "Generate a single Form constructor according to the given parameters." # Extract correct code snippets (argument, assign) = snippets[space_tag] # Construct list of arguments and function space assignments name = "V%d" if form.rank > 0: arguments = [argument % (name % i) for i in reversed(range(form.rank))] assignments = [assign % (i, name % i) for i in range(form.rank)] else: arguments = [argument] assignments = [assign] # Add coefficients to argument/assignment lists if specified if coefficient_tag is not None: (argument, assign) = snippets[coefficient_tag] arguments += [argument % name for name in form.coefficient_names] if form.rank > 0: # FIXME: To match old generated code only assignments += [""] assignments += [assign %(name, name) for name in form.coefficient_names] # Add assignment of _ufc_form variable line = "\n _ufc_form = std::shared_ptr(new %s());" # FIXME: To match old generated code only if form.rank == 0 and coefficient_tag is None: line = " _ufc_form = std::shared_ptr(new %s());" assignments += [line % form.ufc_form_classname] # Construct list for initialization of Coefficient references initializers = ["%s(*this, %d)" % (name, number) for (number, name) in enumerate(form.coefficient_names)] # Join lists together arguments = ", ".join(arguments) initializers = ", " + ", ".join(initializers) if initializers else "" body = "\n".join(assignments) # Wrap code into template args = {"classname": classname, "rank": form.rank, "num_coefficients": form.num_coefficients, "arguments": arguments, "initializers": initializers, "body": body, "superclass": form.superclassname } code = form_constructor_template % args return code #------------------------------------------------------------------------------- form_class_template = """\ class %(classname)s: public dolfin::%(superclass)s { public: %(constructors)s // Destructor ~%(classname)s() {} /// Return the number of the coefficient with this name virtual std::size_t coefficient_number(const std::string& name) const { %(coefficient_number)s } /// Return the name of the coefficient with this number virtual std::string coefficient_name(std::size_t i) const { %(coefficient_name)s } %(members)s }; """ #------------------------------------------------------------------------------- # Template code for Form constructor form_constructor_template = """\ // Constructor %(classname)s(%(arguments)s): dolfin::%(superclass)s(%(rank)d, %(num_coefficients)d)%(initializers)s { %(body)s }""" #------------------------------------------------------------------------------- def apply_form_template(classname, constructors, number, name, members, superclass): args = {"classname": classname, "superclass": superclass, "constructors": constructors, "coefficient_number": number, "coefficient_name": name, "members": members} return form_class_template % args #------------------------------------------------------------------------------- ffc-1.6.0/ffc/backends/dolfin/functionspace.py000066400000000000000000000131061255571034100212560ustar00rootroot00000000000000# Copyright (C) 2011 Marie E. Rognes # # This file is part of DOLFIN. # # DOLFIN is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DOLFIN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DOLFIN. If not, see . # # Based on original implementation by Martin Alnes and Anders Logg # # Last changed: 2012-11-14 from .includes import snippets __all__ = ["apply_function_space_template", "extract_coefficient_spaces", "generate_typedefs"] #------------------------------------------------------------------------------- def extract_coefficient_spaces(forms): """Extract a list of tuples (classname, finite_element_classname, dofmap_classname) for the coefficient spaces in the set of given forms. This can then be used for input to the function space template.""" # Extract data for each coefficient space spaces = {} for form in forms: for (i, name) in enumerate(form.coefficient_names): # Skip if already considered if name in spaces: continue # Map element name, dof map name etc to this coefficient spaces[name] = ("CoefficientSpace_%s" % name, form.ufc_finite_element_classnames[form.rank + i], form.ufc_dofmap_classnames[form.rank + i]) # Return coefficient spaces sorted alphabetically by coefficient # name names = list(spaces.keys()) names.sort() return [spaces[name] for name in names] #------------------------------------------------------------------------------- def generate_typedefs(form, classname): """Generate typedefs for test, trial and coefficient spaces relative to a function space.""" # Generate typedef data for test/trial spaces pairs = [("%s_FunctionSpace_%d" % (classname, i), snippets["functionspace"][i]) for i in range(form.rank)] # Generate typedefs for coefficient spaces pairs += [("%s_FunctionSpace_%d" % (classname, form.rank + i), "CoefficientSpace_%s" % form.coefficient_names[i]) for i in range(form.num_coefficients)] # Combine data to typedef code code = "\n".join(" typedef %s %s;" % (to, fro) for (to, fro) in pairs) return code #------------------------------------------------------------------------------- function_space_template = """\ class %(classname)s: public dolfin::FunctionSpace { public: //--- Constructors for standard function space, 2 different versions --- // Create standard function space (reference version) %(classname)s(const dolfin::Mesh& mesh): dolfin::FunctionSpace(dolfin::reference_to_no_delete_pointer(mesh), std::shared_ptr(new dolfin::FiniteElement(std::shared_ptr(new %(ufc_finite_element_classname)s()))), std::shared_ptr(new dolfin::DofMap(std::shared_ptr(new %(ufc_dofmap_classname)s()), mesh))) { // Do nothing } // Create standard function space (shared pointer version) %(classname)s(std::shared_ptr mesh): dolfin::FunctionSpace(mesh, std::shared_ptr(new dolfin::FiniteElement(std::shared_ptr(new %(ufc_finite_element_classname)s()))), std::shared_ptr(new dolfin::DofMap(std::shared_ptr(new %(ufc_dofmap_classname)s()), *mesh))) { // Do nothing } //--- Constructors for constrained function space, 2 different versions --- // Create standard function space (reference version) %(classname)s(const dolfin::Mesh& mesh, const dolfin::SubDomain& constrained_domain): dolfin::FunctionSpace(dolfin::reference_to_no_delete_pointer(mesh), std::shared_ptr(new dolfin::FiniteElement(std::shared_ptr(new %(ufc_finite_element_classname)s()))), std::shared_ptr(new dolfin::DofMap(std::shared_ptr(new %(ufc_dofmap_classname)s()), mesh, dolfin::reference_to_no_delete_pointer(constrained_domain)))) { // Do nothing } // Create standard function space (shared pointer version) %(classname)s(std::shared_ptr mesh, std::shared_ptr constrained_domain): dolfin::FunctionSpace(mesh, std::shared_ptr(new dolfin::FiniteElement(std::shared_ptr(new %(ufc_finite_element_classname)s()))), std::shared_ptr(new dolfin::DofMap(std::shared_ptr(new %(ufc_dofmap_classname)s()), *mesh, constrained_domain))) { // Do nothing } }; """ #------------------------------------------------------------------------------- def apply_function_space_template(name, element_name, dofmap_name): args = {"classname": name, "ufc_finite_element_classname": element_name, "ufc_dofmap_classname": dofmap_name } return function_space_template % args ffc-1.6.0/ffc/backends/dolfin/goalfunctional.py000066400000000000000000000171611255571034100214270ustar00rootroot00000000000000# Copyright (C) 2010 Marie E. Rognes # # This file is part of DOLFIN. # # DOLFIN is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DOLFIN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DOLFIN. If not, see . # # Last changed: 2011-07-06 __all__ = ["generate_update_ec"] #------------------------------------------------------------------------------- attach_coefficient_template = """ // Attach coefficients from %(from)s to %(to)s for (std::size_t i = 0; i < %(from)s.num_coefficients(); i++) { name = %(from)s.coefficient_name(i); // Don't attach discrete primal solution here (not computed). if (name == "__discrete_primal_solution") continue; // Test whether %(to)s has coefficient named 'name' try { %(to)s->coefficient_number(name); } catch (...) { std::cout << "Attaching coefficient named: " << name << " to %(to)s"; std::cout << " failed! But this might be expected." << std::endl; continue; } %(to)s->set_coefficient(name, %(from)s.coefficient(i)); } """ #------------------------------------------------------------------------------- attach_domains_template = """ // Attach subdomains from %(from)s to %(to)s %(to)s->dx = %(from)s.cell_domains(); %(to)s->ds = %(from)s.exterior_facet_domains(); %(to)s->dS = %(from)s.interior_facet_domains(); """ #------------------------------------------------------------------------------- update_ec_template = """ /// Initialize all error control forms, attach coefficients and /// (re-)set error control virtual void update_ec(const dolfin::Form& a, const dolfin::Form& L) { // This stuff is created here and shipped elsewhere std::shared_ptr a_star; // Dual lhs std::shared_ptr L_star; // Dual rhs std::shared_ptr V_Ez_h; // Extrapolation space std::shared_ptr Ez_h; // Extrapolated dual std::shared_ptr residual; // Residual (as functional) std::shared_ptr V_R_T; // Trial space for cell residual std::shared_ptr a_R_T; // Cell residual lhs std::shared_ptr L_R_T; // Cell residual rhs std::shared_ptr V_b_T; // Function space for cell bubble std::shared_ptr b_T; // Cell bubble std::shared_ptr V_R_dT; // Trial space for facet residual std::shared_ptr a_R_dT; // Facet residual lhs std::shared_ptr L_R_dT; // Facet residual rhs std::shared_ptr V_b_e; // Function space for cell cone std::shared_ptr b_e; // Cell cone std::shared_ptr V_eta_T; // Function space for indicators std::shared_ptr eta_T; // Indicator form // Some handy views const dolfin::FunctionSpace& Vhat(*(a.function_space(0))); // Primal test const dolfin::FunctionSpace& V(*(a.function_space(1))); // Primal trial assert(V.mesh()); const dolfin::Mesh& mesh(*V.mesh()); std::string name; // Initialize dual forms a_star.reset(new %(a_star)s(V, Vhat)); L_star.reset(new %(L_star)s(V)); %(attach_a_star)s %(attach_L_star)s // Initialize residual residual.reset(new %(residual)s(mesh)); %(attach_residual)s // Initialize extrapolation space and (fake) extrapolation V_Ez_h.reset(new %(V_Ez_h)s(mesh)); Ez_h.reset(new dolfin::Function(V_Ez_h)); residual->set_coefficient("__improved_dual", Ez_h); // Create bilinear and linear form for computing cell residual R_T V_R_T.reset(new %(V_R_T)s(mesh)); a_R_T.reset(new %(a_R_T)s(V_R_T, V_R_T)); L_R_T.reset(new %(L_R_T)s(V_R_T)); // Initialize bubble and attach to a_R_T and L_R_T V_b_T.reset(new %(V_b_T)s(mesh)); b_T.reset(new dolfin::Function(V_b_T)); *b_T->vector() = 1.0; %(attach_L_R_T)s // Attach bubble function to _a_R_T and _L_R_T a_R_T->set_coefficient("__cell_bubble", b_T); L_R_T->set_coefficient("__cell_bubble", b_T); // Create bilinear and linear form for computing facet residual R_dT V_R_dT.reset(new %(V_R_dT)s(mesh)); a_R_dT.reset(new %(a_R_dT)s(V_R_dT, V_R_dT)); L_R_dT.reset(new %(L_R_dT)s(V_R_dT)); %(attach_L_R_dT)s // Initialize (fake) cone and attach to a_R_dT and L_R_dT V_b_e.reset(new %(V_b_e)s(mesh)); b_e.reset(new dolfin::Function(V_b_e)); a_R_dT->set_coefficient("__cell_cone", b_e); L_R_dT->set_coefficient("__cell_cone", b_e); // Create error indicator form V_eta_T.reset(new %(V_eta_T)s(mesh)); eta_T.reset(new %(eta_T)s(V_eta_T)); // Update error control _ec.reset(new dolfin::ErrorControl(a_star, L_star, residual, a_R_T, L_R_T, a_R_dT, L_R_dT, eta_T, %(linear)s)); } """ #------------------------------------------------------------------------------- def _attach(tos, froms): if not isinstance(froms, tuple): return attach_coefficient_template % {"to": tos, "from": froms} \ + attach_domains_template % {"to": tos, "from": froms} # NB: If multiple forms involved, attach domains from last form. coeffs = "\n".join([attach_coefficient_template % {"to": to, "from": fro} for (to, fro) in zip(tos, froms)]) domains = attach_domains_template % {"to": tos[-1], "from": froms[-1]} return coeffs + domains #------------------------------------------------------------------------------- def generate_maps(linear): """ NB: This depends on the ordering of the forms """ maps = {"a_star": "Form_%d" % 0, "L_star": "Form_%d" % 1, "residual": "Form_%d" % 2, "a_R_T": "Form_%d" % 3, "L_R_T": "Form_%d" % 4, "a_R_dT": "Form_%d" % 5, "L_R_dT": "Form_%d" % 6, "eta_T": "Form_%d" % 7, "V_Ez_h": "CoefficientSpace_%s" % "__improved_dual", "V_R_T": "Form_%d::TestSpace" % 4, "V_b_T": "CoefficientSpace_%s" % "__cell_bubble", "V_R_dT": "Form_%d::TestSpace" % 6, "V_b_e": "CoefficientSpace_%s" % "__cell_cone", "V_eta_T": "Form_%d::TestSpace" % 7, "attach_a_star": _attach("a_star", "a"), "attach_L_star": _attach("L_star", "(*this)"), "attach_residual": _attach(("residual",)*2, ("a", "L")), "attach_L_R_T": _attach(("L_R_T",)*2, ("a", "L")), "attach_L_R_dT": _attach(("L_R_dT",)*2, ("a", "L")), "linear": "true" if linear else "false" } return maps #------------------------------------------------------------------------------- def generate_update_ec(form): linear = "__discrete_primal_solution" in form.coefficient_names maps = generate_maps(linear) code = update_ec_template % maps return code ffc-1.6.0/ffc/backends/dolfin/includes.py000066400000000000000000000033701255571034100202250ustar00rootroot00000000000000 # Based on original implementation by Martin Alnes and Anders Logg __all__ = ["dolfin_tag", "stl_includes", "dolfin_includes", "snippets"] dolfin_tag = "// DOLFIN wrappers" stl_includes = """\ // Standard library includes #include """ dolfin_includes = """\ // DOLFIN includes #include #include #include #include #include #include #include #include #include """ #------------------------------------------------------------------------------- snippets = {"shared_ptr_space": ("std::shared_ptr %s", " _function_spaces[%d] = %s;"), "referenced_space": ("const dolfin::FunctionSpace& %s", " _function_spaces[%d] = reference_to_no_delete_pointer(%s);"), "shared_ptr_mesh": ("std::shared_ptr mesh", " _mesh = mesh;"), "referenced_mesh": ("const dolfin::Mesh& mesh", " _mesh = reference_to_no_delete_pointer(mesh);"), "shared_ptr_coefficient": ("std::shared_ptr %s", " this->%s = *%s;"), "referenced_coefficient": ("const dolfin::GenericFunction& %s", " this->%s = %s;"), "functionspace": ("TestSpace", "TrialSpace") } #------------------------------------------------------------------------------- ffc-1.6.0/ffc/backends/dolfin/wrappers.py000066400000000000000000000135211255571034100202610ustar00rootroot00000000000000# Copyright (C) 2011 Marie E. Rognes # # This file is part of DOLFIN. # # DOLFIN is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DOLFIN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DOLFIN. If not, see . # # Based on original implementation by Martin Alnes and Anders Logg # # Last changed: 2012-12-05 from . import includes as incl from .functionspace import * from .form import generate_form from .capsules import UFCElementNames __all__ = ["generate_dolfin_code"] # NB: generate_dolfin_namespace(...) assumes that if a coefficient has # the same name in multiple forms, it is indeed the same coefficient: parameters = {"use_common_coefficient_names": True} #------------------------------------------------------------------------------- def generate_dolfin_code(prefix, header, forms, common_function_space=False, add_guards=False, error_control=False): """Generate complete dolfin wrapper code with given generated names. @param prefix: String, prefix for all form names. @param header: Code that will be inserted at the top of the file. @param forms: List of UFCFormNames instances or single UFCElementNames. @param common_function_space: True if common function space, otherwise False @param add_guards: True iff guards (ifdefs) should be added @param error_control: True iff adaptivity typedefs (ifdefs) should be added """ # Generate dolfin namespace namespace = generate_dolfin_namespace(prefix, forms, common_function_space, error_control) # Collect pieces of code code = [incl.dolfin_tag, header, incl.stl_includes, incl.dolfin_includes, namespace] # Add ifdefs/endifs if specified if add_guards: guard_name = ("%s_h" % prefix).upper() preguard = "#ifndef %s\n#define %s\n" % (guard_name, guard_name) postguard = "\n#endif\n\n" code = [preguard] + code + [postguard] # Return code return "\n".join(code) #------------------------------------------------------------------------------- def generate_dolfin_namespace(prefix, forms, common_function_space=False, error_control=False): # Allow forms to represent a single space, and treat separately if isinstance(forms, UFCElementNames): return generate_single_function_space(prefix, forms) # Extract (common) coefficient spaces assert(parameters["use_common_coefficient_names"]) spaces = extract_coefficient_spaces(forms) # Generate code for common coefficient spaces code = [apply_function_space_template(*space) for space in spaces] # Generate code for forms code += [generate_form(form, "Form_%s" % form.name) for form in forms] # Generate namespace typedefs (Bilinear/Linear & Test/Trial/Function) code += [generate_namespace_typedefs(forms, common_function_space, error_control)] # Wrap code in namespace block code = "\nnamespace %s\n{\n\n%s\n}" % (prefix, "\n".join(code)) # Return code return code #------------------------------------------------------------------------------- def generate_single_function_space(prefix, space): code = apply_function_space_template("FunctionSpace", space.ufc_finite_element_classnames[0], space.ufc_dofmap_classnames[0]) code = "\nnamespace %s\n{\n\n%s\n}" % (prefix, code) return code #------------------------------------------------------------------------------- def generate_namespace_typedefs(forms, common_function_space, error_control): # Generate typedefs as (fro, to) pairs of strings pairs = [] # Add typedef for Functional/LinearForm/BilinearForm if only one # is present of each aliases = ["Functional", "LinearForm", "BilinearForm"] extra_aliases = {"LinearForm": "ResidualForm", "BilinearForm": "JacobianForm"} for rank in sorted(range(len(aliases)), reverse=True): forms_of_rank = [form for form in forms if form.rank == rank] if len(forms_of_rank) == 1: pairs += [("Form_%s" % forms_of_rank[0].name, aliases[rank])] if aliases[rank] in extra_aliases: extra_alias = extra_aliases[aliases[rank]] pairs += [("Form_%s" % forms_of_rank[0].name, extra_alias)] # Keepin' it simple: Add typedef for FunctionSpace if term applies if common_function_space: for i, form in enumerate(forms): if form.rank: pairs += [("Form_%s::TestSpace" % form.name, "FunctionSpace")] break # Add specialized typedefs when adding error control wrapppers if error_control: pairs += error_control_pairs(forms) # Combine data to typedef code typedefs = "\n".join("typedef %s %s;" % (to, fro) for (to, fro) in pairs) # Return typedefs or "" if not typedefs: return "" return "// Class typedefs\n" + typedefs + "\n" #------------------------------------------------------------------------------- def error_control_pairs(forms): assert (len(forms) == 11), "Expecting 11 error control forms" return [("Form_%s" % forms[8].name, "BilinearForm"), ("Form_%s" % forms[9].name, "LinearForm"), ("Form_%s" % forms[10].name, "GoalFunctional")] ffc-1.6.0/ffc/backends/ufc/000077500000000000000000000000001255571034100153445ustar00rootroot00000000000000ffc-1.6.0/ffc/backends/ufc/__init__.py000066400000000000000000000067501255571034100174650ustar00rootroot00000000000000"""Code generation format strings for UFC (Unified Form-assembly Code) v. 1.6.0 Three format strings are defined for each of the following UFC classes: function finite_element dofmap cell_integral exterior_facet_integral interior_facet_integral form The strings are named '_header', '_implementation', and '_combined'. The header and implementation contain the definition and declaration respectively, and are meant to be placed in .h and .cpp files, while the combined version is for an implementation within a single .h header. Each string has the following format variables: 'classname', 'members', 'constructor', 'destructor', plus one for each interface function with name equal to the function name. For more information about UFC and the FEniCS Project, visit http://www.fenicsproject.org https://bitbucket.org/fenics-project/ufc """ # -*- coding: utf-8 -*- __author__ = "Martin Sandve Alnaes, Anders Logg, Kent-Andre Mardal, Ola Skavhaug, and Hans Petter Langtangen" __date__ = "2015-07-28" __version__ = "1.6.0" __license__ = "This code is released into the public domain" from .function import * from .finite_element import * from .dofmap import * from .integrals import * from .form import * from .build import build_ufc_module templates = {"function_header": function_header, "function_implementation": function_implementation, "function_combined": function_combined, "finite_element_header": finite_element_header, "finite_element_implementation": finite_element_implementation, "finite_element_combined": finite_element_combined, "dofmap_header": dofmap_header, "dofmap_implementation": dofmap_implementation, "dofmap_combined": dofmap_combined, "cell_integral_header": cell_integral_header, "cell_integral_implementation": cell_integral_implementation, "cell_integral_combined": cell_integral_combined, "exterior_facet_integral_header": exterior_facet_integral_header, "exterior_facet_integral_implementation": exterior_facet_integral_implementation, "exterior_facet_integral_combined": exterior_facet_integral_combined, "interior_facet_integral_header": interior_facet_integral_header, "interior_facet_integral_implementation": interior_facet_integral_implementation, "interior_facet_integral_combined": interior_facet_integral_combined, "vertex_integral_header": vertex_integral_header, "vertex_integral_implementation": vertex_integral_implementation, "vertex_integral_combined": vertex_integral_combined, "custom_integral_header": custom_integral_header, "custom_integral_implementation": custom_integral_implementation, "custom_integral_combined": custom_integral_combined, "form_header": form_header, "form_implementation": form_implementation, "form_combined": form_combined} ffc-1.6.0/ffc/backends/ufc/build.py000066400000000000000000000067501255571034100170250ustar00rootroot00000000000000__author__ = "Johan Hake (hake@simula.no)" __date__ = "2009-03-06 -- 2014-05-20" __license__ = "This code is released into the public domain" __all__ = ['build_ufc_module'] # Modified by Martin Alnes, 2009 import instant import os, sys, re, glob from distutils import sysconfig def build_ufc_module(h_files, source_directory="", system_headers=None, \ **kwargs): """Build a python extension module from ufc compliant source code. The compiled module will be imported and returned by the function. @param h_files: The name(s) of the header files that should be compiled and included in the python extension module. @param source_directory: The directory where the source files reside. @param system_headers: Extra headers that will be #included in the generated wrapper file. Any additional keyword arguments are passed on to instant.build_module. """ # Check h_files argument if isinstance(h_files, str): h_files = [h_files] assert isinstance(h_files, list) , "Provide a 'list' or a 'str' as 'h_files'." assert all(isinstance(f, str) for f in h_files), \ "Elements of 'h_files' must be 'str'." h_files2 = [os.path.join(source_directory, fn) for fn in h_files] for f in h_files2: if not os.path.isfile(f): raise IOError("The file '%s' does not exist." % f) # Check system_headers argument system_headers = system_headers or [] assert isinstance(system_headers, list), "Provide a 'list' as 'system_headers'" assert all(isinstance(header, str) for header in system_headers), \ "Elements of 'system_headers' must be 'str'." system_headers.append("memory") # Get the swig interface file declarations declarations = extract_declarations(h_files2) declarations += """ // SWIG version %inline %{ int get_swigversion() { return SWIGVERSION; } %} %pythoncode %{ tmp = hex(get_swigversion()) swigversion = "%d.%d.%d"%(tuple(map(int, [tmp[-5], tmp[-3], tmp[-2:]]))) del tmp, get_swigversion %} """ # Call instant and return module return instant.build_module(wrap_headers = h_files, source_directory = source_directory, additional_declarations = declarations, system_headers = system_headers, cmake_packages = ["UFC"], **kwargs) def extract_declarations(h_files): "Extract information for shared_ptr" # Swig declarations declarations =r""" // Use std::shared_ptr #define SWIG_SHARED_PTR_NAMESPACE std %include // Declare which classes should be stored using shared_ptr %include "swig/ufc_shared_ptr_classes.i" // Import types from ufc %import(module="ufc") "ufc.h" // Swig shared_ptr macro declarations """ for h_file in h_files: # Read the code with open(h_file) as file: code = file.read() # Extract the class names derived_classes = re.findall(r"class[ ]+([\w]+)[ ]*: public", code) ufc_classes = re.findall(r"public[ ]+(ufc::[\w]+).*", code) ufc_proxy_classes = [s.replace("ufc::", "") for s in ufc_classes] new_share_ptr_format = "%%shared_ptr(%s)" # Write shared_ptr code for swig 2.0.0 or higher declarations += "\n".join(new_share_ptr_format%c for c in derived_classes) declarations += "\n" return declarations ffc-1.6.0/ffc/backends/ufc/dofmap.py000066400000000000000000000204271255571034100171710ustar00rootroot00000000000000# Code generation format strings for UFC (Unified Form-assembly Code) v. 1.6.0. # This code is released into the public domain. # # The FEniCS Project (http://www.fenicsproject.org/) 2006-2015. dofmap_combined = """\ /// This class defines the interface for a local-to-global mapping of /// degrees of freedom (dofs). class %(classname)s: public ufc::dofmap {%(members)s public: /// Constructor %(classname)s(%(constructor_arguments)s) : ufc::dofmap()%(initializer_list)s { %(constructor)s } /// Destructor virtual ~%(classname)s() { %(destructor)s } /// Return a string identifying the dofmap virtual const char* signature() const { %(signature)s } /// Return true iff mesh entities of topological dimension d are needed virtual bool needs_mesh_entities(std::size_t d) const { %(needs_mesh_entities)s } /// Return the topological dimension of the associated cell shape virtual std::size_t topological_dimension() const { %(topological_dimension)s } /// Return the geometric dimension of the associated cell shape virtual std::size_t geometric_dimension() const { %(geometric_dimension)s } /// Return the dimension of the global finite element function space virtual std::size_t global_dimension(const std::vector& num_global_entities) const { %(global_dimension)s } /// Return the dimension of the local finite element function space for a cell virtual std::size_t num_element_dofs() const { %(num_element_dofs)s } /// Return the number of dofs on each cell facet virtual std::size_t num_facet_dofs() const { %(num_facet_dofs)s } /// Return the number of dofs associated with each cell entity of dimension d virtual std::size_t num_entity_dofs(std::size_t d) const { %(num_entity_dofs)s } /// Tabulate the local-to-global mapping of dofs on a cell virtual void tabulate_dofs(std::size_t* dofs, const std::vector& num_global_entities, const ufc::cell& c) const { %(tabulate_dofs)s } /// Tabulate the local-to-local mapping from facet dofs to cell dofs virtual void tabulate_facet_dofs(std::size_t* dofs, std::size_t facet) const { %(tabulate_facet_dofs)s } /// Tabulate the local-to-local mapping of dofs on entity (d, i) virtual void tabulate_entity_dofs(std::size_t* dofs, std::size_t d, std::size_t i) const { %(tabulate_entity_dofs)s } /// Tabulate the coordinates of all dofs on a cell virtual void tabulate_coordinates(double* dof_coordinates, const double* vertex_coordinates) const { %(tabulate_coordinates)s } /// Return the number of sub dofmaps (for a mixed element) virtual std::size_t num_sub_dofmaps() const { %(num_sub_dofmaps)s } /// Create a new dofmap for sub dofmap i (for a mixed element) virtual ufc::dofmap* create_sub_dofmap(std::size_t i) const { %(create_sub_dofmap)s } /// Create a new class instance virtual ufc::dofmap* create() const { %(create)s } }; """ dofmap_header = """\ /// This class defines the interface for a local-to-global mapping of /// degrees of freedom (dofs). class %(classname)s: public ufc::dofmap {%(members)s public: /// Constructor %(classname)s(%(constructor_arguments)s); /// Destructor virtual ~%(classname)s(); /// Return a string identifying the dofmap virtual const char* signature() const; /// Return true iff mesh entities of topological dimension d are needed virtual bool needs_mesh_entities(std::size_t d) const; /// Return the topological dimension of the associated cell shape virtual std::size_t topological_dimension() const; /// Return the geometric dimension of the associated cell shape virtual std::size_t geometric_dimension() const; /// Return the dimension of the global finite element function space virtual std::size_t global_dimension(const std::vector& num_global_entities) const; /// Return the dimension of the local finite element function space for a cell virtual std::size_t num_element_dofs() const; /// Return the number of dofs on each cell facet virtual std::size_t num_facet_dofs() const; /// Return the number of dofs associated with each cell entity of dimension d virtual std::size_t num_entity_dofs(std::size_t d) const; /// Tabulate the local-to-global mapping of dofs on a cell virtual void tabulate_dofs(std::size_t* dofs, const std::vector& num_global_entities, const ufc::cell& c) const; /// Tabulate the local-to-local mapping from facet dofs to cell dofs virtual void tabulate_facet_dofs(std::size_t* dofs, std::size_t facet) const; /// Tabulate the local-to-local mapping of dofs on entity (d, i) virtual void tabulate_entity_dofs(std::size_t* dofs, std::size_t d, std::size_t i) const; /// Tabulate the coordinates of all dofs on a cell virtual void tabulate_coordinates(double* coordinates, const double* vertex_coordinates) const; /// Return the number of sub dofmaps (for a mixed element) virtual std::size_t num_sub_dofmaps() const; /// Create a new dofmap for sub dofmap i (for a mixed element) virtual ufc::dofmap* create_sub_dofmap(std::size_t i) const; /// Create a new class instance virtual ufc::dofmap* create() const; }; """ dofmap_implementation = """\ /// Constructor %(classname)s::%(classname)s(%(constructor_arguments)s) : ufc::dofmap()%(initializer_list)s { %(constructor)s } /// Destructor %(classname)s::~%(classname)s() { %(destructor)s } /// Return a string identifying the dofmap const char* %(classname)s::signature() const { %(signature)s } /// Return true iff mesh entities of topological dimension d are needed bool %(classname)s::needs_mesh_entities(std::size_t d) const { %(needs_mesh_entities)s } /// Return the topological dimension of the associated cell shape std::size_t %(classname)s::topological_dimension() const { %(topological_dimension)s } /// Return the geometric dimension of the associated cell shape std::size_t %(classname)s::geometric_dimension() const { %(geometric_dimension)s } /// Return the dimension of the global finite element function space std::size_t %(classname)s::global_dimension(const std::vector& num_global_entities) const { %(global_dimension)s } /// Return the dimension of the local finite element function space for a cell std::size_t %(classname)s::num_element_dofs() const { %(num_element_dofs)s } /// Return the number of dofs on each cell facet std::size_t %(classname)s::num_facet_dofs() const { %(num_facet_dofs)s } /// Return the number of dofs associated with each cell entity of dimension d std::size_t %(classname)s::num_entity_dofs(std::size_t d) const { %(num_entity_dofs)s } /// Tabulate the local-to-global mapping of dofs on a cell void %(classname)s::tabulate_dofs(std::size_t* dofs, const std::vector& num_global_entities, const ufc::cell& c) const { %(tabulate_dofs)s } /// Tabulate the local-to-local mapping from facet dofs to cell dofs void %(classname)s::tabulate_facet_dofs(std::size_t* dofs, std::size_t facet) const { %(tabulate_facet_dofs)s } /// Tabulate the local-to-local mapping of dofs on entity (d, i) void %(classname)s::tabulate_entity_dofs(std::size_t* dofs, std::size_t d, std::size_t i) const { %(tabulate_entity_dofs)s } /// Tabulate the coordinates of all dofs on a cell void %(classname)s::tabulate_coordinates(double* dof_coordinates, const double* vertex_coordinates) const { %(tabulate_coordinates)s } /// Return the number of sub dofmaps (for a mixed element) std::size_t %(classname)s::num_sub_dofmaps() const { %(num_sub_dofmaps)s } /// Create a new dofmap for sub dofmap i (for a mixed element) ufc::dofmap* %(classname)s::create_sub_dofmap(std::size_t i) const { %(create_sub_dofmap)s } /// Create a new class instance ufc::dofmap* %(classname)s::create() const { %(create)s } """ ffc-1.6.0/ffc/backends/ufc/finite_element.py000066400000000000000000000442711255571034100207150ustar00rootroot00000000000000# Code generation format strings for UFC (Unified Form-assembly Code) v. 1.6.0. # This code is released into the public domain. # # The FEniCS Project (http://www.fenicsproject.org/) 2006-2015. finite_element_combined = """\ /// This class defines the interface for a finite element. class %(classname)s: public ufc::finite_element {%(members)s public: /// Constructor %(classname)s(%(constructor_arguments)s) : ufc::finite_element()%(initializer_list)s { %(constructor)s } /// Destructor virtual ~%(classname)s() { %(destructor)s } /// Return a string identifying the finite element virtual const char* signature() const { %(signature)s } /// Return the cell shape virtual ufc::shape cell_shape() const { %(cell_shape)s } /// Return the topological dimension of the cell shape virtual std::size_t topological_dimension() const { %(topological_dimension)s } /// Return the geometric dimension of the cell shape virtual std::size_t geometric_dimension() const { %(geometric_dimension)s } /// Return the dimension of the finite element function space virtual std::size_t space_dimension() const { %(space_dimension)s } /// Return the rank of the value space virtual std::size_t value_rank() const { %(value_rank)s } /// Return the dimension of the value space for axis i virtual std::size_t value_dimension(std::size_t i) const { %(value_dimension)s } /// Evaluate basis function i at given point x in cell (actual implementation) static void _evaluate_basis(std::size_t i, double* values, const double* x, const double* vertex_coordinates, int cell_orientation) { %(evaluate_basis)s } /// Evaluate basis function i at given point x in cell (non-static member function) virtual void evaluate_basis(std::size_t i, double* values, const double* x, const double* vertex_coordinates, int cell_orientation) const { _evaluate_basis(i, values, x, vertex_coordinates, cell_orientation); } /// Evaluate all basis functions at given point x in cell (actual implementation) static void _evaluate_basis_all(double* values, const double* x, const double* vertex_coordinates, int cell_orientation) { %(evaluate_basis_all)s } /// Evaluate all basis functions at given point x in cell (non-static member function) virtual void evaluate_basis_all(double* values, const double* x, const double* vertex_coordinates, int cell_orientation) const { _evaluate_basis_all(values, x, vertex_coordinates, cell_orientation); } /// Evaluate order n derivatives of basis function i at given point x in cell (actual implementation) static void _evaluate_basis_derivatives(std::size_t i, std::size_t n, double* values, const double* x, const double* vertex_coordinates, int cell_orientation) { %(evaluate_basis_derivatives)s } /// Evaluate order n derivatives of basis function i at given point x in cell (non-static member function) virtual void evaluate_basis_derivatives(std::size_t i, std::size_t n, double* values, const double* x, const double* vertex_coordinates, int cell_orientation) const { _evaluate_basis_derivatives(i, n, values, x, vertex_coordinates, cell_orientation); } /// Evaluate order n derivatives of all basis functions at given point x in cell (actual implementation) static void _evaluate_basis_derivatives_all(std::size_t n, double* values, const double* x, const double* vertex_coordinates, int cell_orientation) { %(evaluate_basis_derivatives_all)s } /// Evaluate order n derivatives of all basis functions at given point x in cell (non-static member function) virtual void evaluate_basis_derivatives_all(std::size_t n, double* values, const double* x, const double* vertex_coordinates, int cell_orientation) const { _evaluate_basis_derivatives_all(n, values, x, vertex_coordinates, cell_orientation); } /// Evaluate linear functional for dof i on the function f virtual double evaluate_dof(std::size_t i, const ufc::function& f, const double* vertex_coordinates, int cell_orientation, const ufc::cell& c) const { %(evaluate_dof)s } /// Evaluate linear functionals for all dofs on the function f virtual void evaluate_dofs(double* values, const ufc::function& f, const double* vertex_coordinates, int cell_orientation, const ufc::cell& c) const { %(evaluate_dofs)s } /// Interpolate vertex values from dof values virtual void interpolate_vertex_values(double* vertex_values, const double* dof_values, const double* vertex_coordinates, int cell_orientation, const ufc::cell& c) const { %(interpolate_vertex_values)s } /// Map coordinate xhat from reference cell to coordinate x in cell virtual void map_from_reference_cell(double* x, const double* xhat, const ufc::cell& c) const { %(map_from_reference_cell)s } /// Map from coordinate x in cell to coordinate xhat in reference cell virtual void map_to_reference_cell(double* xhat, const double* x, const ufc::cell& c) const { %(map_to_reference_cell)s } /// Return the number of sub elements (for a mixed element) virtual std::size_t num_sub_elements() const { %(num_sub_elements)s } /// Create a new finite element for sub element i (for a mixed element) virtual ufc::finite_element* create_sub_element(std::size_t i) const { %(create_sub_element)s } /// Create a new class instance virtual ufc::finite_element* create() const { %(create)s } }; """ finite_element_header = """\ /// This class defines the interface for a finite element. class %(classname)s: public ufc::finite_element {%(members)s public: /// Constructor %(classname)s(%(constructor_arguments)s); /// Destructor virtual ~%(classname)s(); /// Return a string identifying the finite element virtual const char* signature() const; /// Return the cell shape virtual ufc::shape cell_shape() const; /// Return the topological dimension of the cell shape virtual std::size_t topological_dimension() const; /// Return the geometric dimension of the cell shape virtual std::size_t geometric_dimension() const; /// Return the dimension of the finite element function space virtual std::size_t space_dimension() const; /// Return the rank of the value space virtual std::size_t value_rank() const; /// Return the dimension of the value space for axis i virtual std::size_t value_dimension(std::size_t i) const; /// Evaluate basis function i at given point x in cell (actual implementation) static void _evaluate_basis(std::size_t i, double* values, const double* x, const double* vertex_coordinates, int cell_orientation); /// Evaluate basis function i at given point x in cell (non-static member function) virtual void evaluate_basis(std::size_t i, double* values, const double* x, const double* vertex_coordinates, int cell_orientation) const { _evaluate_basis(i, values, x, vertex_coordinates, cell_orientation); } /// Evaluate all basis functions at given point x in cell (actual implementation) static void _evaluate_basis_all(double* values, const double* x, const double* vertex_coordinates, int cell_orientation); /// Evaluate all basis functions at given point x in cell (non-static member function) virtual void evaluate_basis_all(double* values, const double* x, const double* vertex_coordinates, int cell_orientation) const { _evaluate_basis_all(values, x, vertex_coordinates, cell_orientation); } /// Evaluate order n derivatives of basis function i at given point x in cell (actual implementation) static void _evaluate_basis_derivatives(std::size_t i, std::size_t n, double* values, const double* x, const double* vertex_coordinates, int cell_orientation); /// Evaluate order n derivatives of basis function i at given point x in cell (non-static member function) virtual void evaluate_basis_derivatives(std::size_t i, std::size_t n, double* values, const double* x, const double* vertex_coordinates, int cell_orientation) const { _evaluate_basis_derivatives(i, n, values, x, vertex_coordinates, cell_orientation); } /// Evaluate order n derivatives of all basis functions at given point x in cell (actual implementation) static void _evaluate_basis_derivatives_all(std::size_t n, double* values, const double* x, const double* vertex_coordinates, int cell_orientation); /// Evaluate order n derivatives of all basis functions at given point x in cell (non-static member function) virtual void evaluate_basis_derivatives_all(std::size_t n, double* values, const double* x, const double* vertex_coordinates, int cell_orientation) const { _evaluate_basis_derivatives_all(n, values, x, vertex_coordinates, cell_orientation); } /// Evaluate linear functional for dof i on the function f virtual double evaluate_dof(std::size_t i, const ufc::function& f, const double* vertex_coordinates, int cell_orientation, const ufc::cell& c) const; /// Evaluate linear functionals for all dofs on the function f virtual void evaluate_dofs(double* values, const ufc::function& f, const double* vertex_coordinates, int cell_orientation, const ufc::cell& c) const; /// Interpolate vertex values from dof values virtual void interpolate_vertex_values(double* vertex_values, const double* dof_values, const double* vertex_coordinates, int cell_orientation, const ufc::cell& c) const; /// Map coordinate xhat from reference cell to coordinate x in cell virtual void map_from_reference_cell(double* x, const double* xhat, const ufc::cell& c) const; /// Map from coordinate x in cell to coordinate xhat in reference cell virtual void map_to_reference_cell(double* xhat, const double* x, const ufc::cell& c) const; /// Return the number of sub elements (for a mixed element) virtual std::size_t num_sub_elements() const; /// Create a new finite element for sub element i (for a mixed element) virtual ufc::finite_element* create_sub_element(std::size_t i) const; /// Create a new class instance virtual ufc::finite_element* create() const; }; """ finite_element_implementation= """\ /// Constructor %(classname)s::%(classname)s(%(constructor_arguments)s) : ufc::finite_element()%(initializer_list)s { %(constructor)s } /// Destructor %(classname)s::~%(classname)s() { %(destructor)s } /// Return a string identifying the finite element const char* %(classname)s::signature() const { %(signature)s } /// Return the cell shape ufc::shape %(classname)s::cell_shape() const { %(cell_shape)s } /// Return the topological dimension of the cell shape std::size_t %(classname)s::topological_dimension() const { %(topological_dimension)s } /// Return the geometric dimension of the cell shape std::size_t %(classname)s::geometric_dimension() const { %(geometric_dimension)s } /// Return the dimension of the finite element function space std::size_t %(classname)s::space_dimension() const { %(space_dimension)s } /// Return the rank of the value space std::size_t %(classname)s::value_rank() const { %(value_rank)s } /// Return the dimension of the value space for axis i std::size_t %(classname)s::value_dimension(std::size_t i) const { %(value_dimension)s } /// Evaluate basis function i at given point x in cell void %(classname)s::_evaluate_basis(std::size_t i, double* values, const double* x, const double* vertex_coordinates, int cell_orientation) { %(evaluate_basis)s } /// Evaluate all basis functions at given point x in cell void %(classname)s::_evaluate_basis_all(double* values, const double* x, const double* vertex_coordinates, int cell_orientation) { %(evaluate_basis_all)s } /// Evaluate order n derivatives of basis function i at given point x in cell void %(classname)s::_evaluate_basis_derivatives(std::size_t i, std::size_t n, double* values, const double* x, const double* vertex_coordinates, int cell_orientation) { %(evaluate_basis_derivatives)s } /// Evaluate order n derivatives of all basis functions at given point x in cell void %(classname)s::_evaluate_basis_derivatives_all(std::size_t n, double* values, const double* x, const double* vertex_coordinates, int cell_orientation) { %(evaluate_basis_derivatives_all)s } /// Evaluate linear functional for dof i on the function f double %(classname)s::evaluate_dof(std::size_t i, const ufc::function& f, const double* vertex_coordinates, int cell_orientation, const ufc::cell& c) const { %(evaluate_dof)s } /// Evaluate linear functionals for all dofs on the function f void %(classname)s::evaluate_dofs(double* values, const ufc::function& f, const double* vertex_coordinates, int cell_orientation, const ufc::cell& c) const { %(evaluate_dofs)s } /// Interpolate vertex values from dof values void %(classname)s::interpolate_vertex_values(double* vertex_values, const double* dof_values, const double* vertex_coordinates, int cell_orientation, const ufc::cell& c) const { %(interpolate_vertex_values)s } /// Map coordinate xhat from reference cell to coordinate x in cell void %(classname)s::map_from_reference_cell(double* x, const double* xhat, const ufc::cell& c) const { %(map_from_reference_cell)s } /// Map from coordinate x in cell to coordinate xhat in reference cell void %(classname)s::map_to_reference_cell(double* xhat, const double* x, const ufc::cell& c) const { %(map_to_reference_cell)s } /// Return the number of sub elements (for a mixed element) std::size_t %(classname)s::num_sub_elements() const { %(num_sub_elements)s } /// Create a new finite element for sub element i (for a mixed element) ufc::finite_element* %(classname)s::create_sub_element(std::size_t i) const { %(create_sub_element)s } /// Create a new class instance ufc::finite_element* %(classname)s::create() const { %(create)s } """ ffc-1.6.0/ffc/backends/ufc/form.py000066400000000000000000000317111255571034100166640ustar00rootroot00000000000000# Code generation format strings for UFC (Unified Form-assembly Code) v. 1.6.0. # This code is released into the public domain. # # The FEniCS Project (http://www.fenicsproject.org/) 2006-2015. form_combined = """\ /// This class defines the interface for the assembly of the global /// tensor corresponding to a form with r + n arguments, that is, a /// mapping /// /// a : V1 x V2 x ... Vr x W1 x W2 x ... x Wn -> R /// /// with arguments v1, v2, ..., vr, w1, w2, ..., wn. The rank r /// global tensor A is defined by /// /// A = a(V1, V2, ..., Vr, w1, w2, ..., wn), /// /// where each argument Vj represents the application to the /// sequence of basis functions of Vj and w1, w2, ..., wn are given /// fixed functions (coefficients). class %(classname)s: public ufc::form {%(members)s public: /// Constructor %(classname)s(%(constructor_arguments)s) : ufc::form()%(initializer_list)s { %(constructor)s } /// Destructor virtual ~%(classname)s() { %(destructor)s } /// Return a string identifying the form virtual const char* signature() const { %(signature)s } /// Return the rank of the global tensor (r) virtual std::size_t rank() const { %(rank)s } /// Return the number of coefficients (n) virtual std::size_t num_coefficients() const { %(num_coefficients)s } /// Return original coefficient position for each coefficient (0 <= i < n) virtual std::size_t original_coefficient_position(std::size_t i) const { %(original_coefficient_position)s } /// Create a new finite element for argument function i virtual ufc::finite_element* create_finite_element(std::size_t i) const { %(create_finite_element)s } /// Create a new dofmap for argument function i virtual ufc::dofmap* create_dofmap(std::size_t i) const { %(create_dofmap)s } /// Return the number of cell domains virtual std::size_t max_cell_subdomain_id() const { %(max_cell_subdomain_id)s } /// Return the number of exterior facet domains virtual std::size_t max_exterior_facet_subdomain_id() const { %(max_exterior_facet_subdomain_id)s } /// Return the number of interior facet domains virtual std::size_t max_interior_facet_subdomain_id() const { %(max_interior_facet_subdomain_id)s } /// Return the number of vertex domains virtual std::size_t max_vertex_subdomain_id() const { %(max_vertex_subdomain_id)s } /// Return the number of custom domains virtual std::size_t max_custom_subdomain_id() const { %(max_custom_subdomain_id)s } /// Return whether the form has any cell integrals virtual bool has_cell_integrals() const { %(has_cell_integrals)s } /// Return whether the form has any exterior facet integrals virtual bool has_exterior_facet_integrals() const { %(has_exterior_facet_integrals)s } /// Return whether the form has any interior facet integrals virtual bool has_interior_facet_integrals() const { %(has_interior_facet_integrals)s } /// Return whether the form has any vertex integrals virtual bool has_vertex_integrals() const { %(has_vertex_integrals)s } /// Return whether the form has any custom integrals virtual bool has_custom_integrals() const { %(has_custom_integrals)s } /// Create a new cell integral on sub domain subdomain_id virtual ufc::cell_integral* create_cell_integral(std::size_t subdomain_id) const { %(create_cell_integral)s } /// Create a new exterior facet integral on sub domain subdomain_id virtual ufc::exterior_facet_integral* create_exterior_facet_integral(std::size_t subdomain_id) const { %(create_exterior_facet_integral)s } /// Create a new interior facet integral on sub domain subdomain_id virtual ufc::interior_facet_integral* create_interior_facet_integral(std::size_t subdomain_id) const { %(create_interior_facet_integral)s } /// Create a new vertex integral on sub domain subdomain_id virtual ufc::vertex_integral* create_vertex_integral(std::size_t subdomain_id) const { %(create_vertex_integral)s } /// Create a new custom integral on sub domain subdomain_id virtual ufc::custom_integral* create_custom_integral(std::size_t subdomain_id) const { %(create_custom_integral)s } /// Create a new cell integral on everywhere else virtual ufc::cell_integral* create_default_cell_integral() const { %(create_default_cell_integral)s } /// Create a new exterior facet integral on everywhere else virtual ufc::exterior_facet_integral* create_default_exterior_facet_integral() const { %(create_default_exterior_facet_integral)s } /// Create a new interior facet integral on everywhere else virtual ufc::interior_facet_integral* create_default_interior_facet_integral() const { %(create_default_interior_facet_integral)s } /// Create a new vertex integral on everywhere else virtual ufc::vertex_integral* create_default_vertex_integral() const { %(create_default_vertex_integral)s } /// Create a new custom integral on everywhere else virtual ufc::custom_integral* create_default_custom_integral() const { %(create_default_custom_integral)s } }; """ form_header = """\ /// This class defines the interface for the assembly of the global /// tensor corresponding to a form with r + n arguments, that is, a /// mapping /// /// a : V1 x V2 x ... Vr x W1 x W2 x ... x Wn -> R /// /// with arguments v1, v2, ..., vr, w1, w2, ..., wn. The rank r /// global tensor A is defined by /// /// A = a(V1, V2, ..., Vr, w1, w2, ..., wn), /// /// where each argument Vj represents the application to the /// sequence of basis functions of Vj and w1, w2, ..., wn are given /// fixed functions (coefficients). class %(classname)s: public ufc::form {%(members)s public: /// Constructor %(classname)s(%(constructor_arguments)s); /// Destructor virtual ~%(classname)s(); /// Return a string identifying the form virtual const char* signature() const; /// Return the rank of the global tensor (r) virtual std::size_t rank() const; /// Return the number of coefficients (n) virtual std::size_t num_coefficients() const; /// Return original coefficient position for each coefficient (0 <= i < n) virtual std::size_t original_coefficient_position(std::size_t i) const; /// Create a new finite element for argument function i virtual ufc::finite_element* create_finite_element(std::size_t i) const; /// Create a new dofmap for argument function i virtual ufc::dofmap* create_dofmap(std::size_t i) const; /// Return the number of cell domains virtual std::size_t max_cell_subdomain_id() const; /// Return the number of exterior facet domains virtual std::size_t max_exterior_facet_subdomain_id() const; /// Return the number of interior facet domains virtual std::size_t max_interior_facet_subdomain_id() const; /// Return the number of vertex domains virtual std::size_t max_vertex_subdomain_id() const; /// Return the number of custom domains virtual std::size_t max_custom_subdomain_id() const; /// Return whether the form has any cell integrals virtual bool has_cell_integrals() const; /// Return whether the form has any exterior facet integrals virtual bool has_exterior_facet_integrals() const; /// Return whether the form has any interior facet integrals virtual bool has_interior_facet_integrals() const; /// Return whether the form has any vertex integrals virtual bool has_vertex_integrals() const; /// Return whether the form has any custom integrals virtual bool has_custom_integrals() const; /// Create a new cell integral on sub domain i virtual ufc::cell_integral* create_cell_integral(std::size_t i) const; /// Create a new exterior facet integral on sub domain i virtual ufc::exterior_facet_integral* create_exterior_facet_integral(std::size_t i) const; /// Create a new interior facet integral on sub domain i virtual ufc::interior_facet_integral* create_interior_facet_integral(std::size_t i) const; /// Create a new vertex integral on sub domain i virtual ufc::vertex_integral* create_vertex_integral(std::size_t i) const; /// Create a new custom integral on sub domain i virtual ufc::custom_integral* create_custom_integral(std::size_t i) const; /// Create a new cell integral on everywhere else virtual ufc::cell_integral* create_default_cell_integral() const; /// Create a new exterior facet integral on everywhere else virtual ufc::exterior_facet_integral* create_default_exterior_facet_integral() const; /// Create a new interior facet integral on everywhere else virtual ufc::interior_facet_integral* create_default_interior_facet_integral() const; /// Create a new vertex integral on everywhere else virtual ufc::vertex_integral* create_default_vertex_integral() const; /// Create a new custom integral on everywhere else virtual ufc::custom_integral* create_default_custom_integral() const; }; """ form_implementation = """\ /// Constructor %(classname)s::%(classname)s(%(constructor_arguments)s) : ufc::form()%(initializer_list)s { %(constructor)s } /// Destructor %(classname)s::~%(classname)s() { %(destructor)s } /// Return a string identifying the form const char* %(classname)s::signature() const { %(signature)s } /// Return the rank of the global tensor (r) std::size_t %(classname)s::rank() const { %(rank)s } /// Return the number of coefficients (n) std::size_t %(classname)s::num_coefficients() const { %(num_coefficients)s } /// Return original coefficient position for each coefficient (0 <= i < n) std::size_t %(classname)s::original_coefficient_position(std::size_t i) const { %(original_coefficient_position)s } /// Create a new finite element for argument function i ufc::finite_element* %(classname)s::create_finite_element(std::size_t i) const { %(create_finite_element)s } /// Create a new dofmap for argument function i ufc::dofmap* %(classname)s::create_dofmap(std::size_t i) const { %(create_dofmap)s } /// Return the number of cell domains std::size_t %(classname)s::max_cell_subdomain_id() const { %(max_cell_subdomain_id)s } /// Return the number of exterior facet domains std::size_t %(classname)s::max_exterior_facet_subdomain_id() const { %(max_exterior_facet_subdomain_id)s } /// Return the number of interior facet domains std::size_t %(classname)s::max_interior_facet_subdomain_id() const { %(max_interior_facet_subdomain_id)s } /// Return the number of vertex domains std::size_t %(classname)s::max_vertex_subdomain_id() const { %(max_vertex_subdomain_id)s } /// Return the number of custom domains std::size_t %(classname)s::max_custom_subdomain_id() const { %(max_custom_subdomain_id)s } /// Return whether the form has any cell integrals bool %(classname)s::has_cell_integrals() const { %(has_cell_integrals)s } /// Return whether the form has any exterior facet integrals bool %(classname)s::has_exterior_facet_integrals() const { %(has_exterior_facet_integrals)s } /// Return whether the form has any interior facet integrals bool %(classname)s::has_interior_facet_integrals() const { %(has_interior_facet_integrals)s } /// Return whether the form has any vertex integrals bool %(classname)s::has_vertex_integrals() const { %(has_vertex_integrals)s } /// Return whether the form has any custom integrals bool %(classname)s::has_custom_integrals() const { %(has_custom_integrals)s } /// Create a new cell integral on sub domain subdomain_id ufc::cell_integral* %(classname)s::create_cell_integral(std::size_t subdomain_id) const { %(create_cell_integral)s } /// Create a new exterior facet integral on sub domain subdomain_id ufc::exterior_facet_integral* %(classname)s::create_exterior_facet_integral(std::size_t subdomain_id) const { %(create_exterior_facet_integral)s } /// Create a new interior facet integral on sub domain subdomain_id ufc::interior_facet_integral* %(classname)s::create_interior_facet_integral(std::size_t subdomain_id) const { %(create_interior_facet_integral)s } /// Create a new vertex integral on sub domain subdomain_id ufc::vertex_integral* %(classname)s::create_vertex_integral(std::size_t subdomain_id) const { %(create_vertex_integral)s } /// Create a new custom integral on sub domain subdomain_id ufc::custom_integral* %(classname)s::create_custom_integral(std::size_t subdomain_id) const { %(create_custom_integral)s } /// Create a new cell integral on everywhere else ufc::cell_integral* %(classname)s::create_default_cell_integral() const { %(create_default_cell_integral)s } /// Create a new exterior facet integral on everywhere else ufc::exterior_facet_integral* %(classname)s::create_default_exterior_facet_integral() const { %(create_default_exterior_facet_integral)s } /// Create a new interior facet integral on everywhere else ufc::interior_facet_integral* %(classname)s::create_default_interior_facet_integral() const { %(create_default_interior_facet_integral)s } /// Create a new vertex integral on everywhere else ufc::vertex_integral* %(classname)s::create_default_vertex_integral() const { %(create_default_vertex_integral)s } /// Create a new custom integral on everywhere else ufc::custom_integral* %(classname)s::create_default_custom_integral() const { %(create_default_custom_integral)s } """ ffc-1.6.0/ffc/backends/ufc/function.py000066400000000000000000000032511255571034100175440ustar00rootroot00000000000000# Code generation format strings for UFC (Unified Form-assembly Code) v. 1.6.0. # This code is released into the public domain. # # The FEniCS Project (http://www.fenicsproject.org/) 2006-2015 function_combined = """\ /// This class defines the interface for a general tensor-valued function. class %(classname)s: public ufc::function {%(members)s public: /// Constructor %(classname)s::%(classname)s(%(constructor_arguments)s) : ufc::function()%(initializer_list)s { %(constructor)s } /// Destructor virtual ~%(classname)s() { %(destructor)s } /// Evaluate function at given point in cell virtual void evaluate(double* values, const double* coordinates, const ufc::cell& c) const { %(evaluate)s } }; """ function_header = """\ /// This class defines the interface for a general tensor-valued function. class %(classname)s: public ufc::function {%(members)s public: /// Constructor %(classname)s(%(constructor_arguments)s); /// Destructor virtual ~%(classname)s(); /// Evaluate function at given point in cell virtual void evaluate(double* values, const double* coordinates, const ufc::cell& c) const; }; """ function_implementation = """\ /// Constructor %(classname)s::%(classname)s(%(constructor_arguments)s) : ufc::function()%(initializer_list)s { %(constructor)s } /// Destructor %(classname)s::~%(classname)s() { %(destructor)s } /// Evaluate function at given point in cell void %(classname)s::evaluate(double* values, const double* coordinates, const ufc::cell& c) const { %(evaluate)s } """ ffc-1.6.0/ffc/backends/ufc/integrals.py000066400000000000000000000365121255571034100177150ustar00rootroot00000000000000# Code generation format strings for UFC (Unified Form-assembly Code) v. 1.6.0. # This code is released into the public domain. # # The FEniCS Project (http://www.fenicsproject.org/) 2006-2015 cell_integral_combined = """\ /// This class defines the interface for the tabulation of the cell /// tensor corresponding to the local contribution to a form from /// the integral over a cell. class %(classname)s: public ufc::cell_integral {%(members)s public: /// Constructor %(classname)s(%(constructor_arguments)s) : ufc::cell_integral()%(initializer_list)s { %(constructor)s } /// Destructor virtual ~%(classname)s() { %(destructor)s } /// Tabulate which form coefficients are used by this integral virtual const std::vector & enabled_coefficients() const { %(enabled_coefficients)s } /// Tabulate the tensor for the contribution from a local cell virtual void tabulate_tensor(double* %(restrict)s A, const double * const * %(restrict)s w, const double* %(restrict)s vertex_coordinates, int cell_orientation) const { %(tabulate_tensor)s } }; """ cell_integral_header = """\ /// This class defines the interface for the tabulation of the cell /// tensor corresponding to the local contribution to a form from /// the integral over a cell. class %(classname)s: public ufc::cell_integral {%(members)s public: /// Constructor %(classname)s(%(constructor_arguments)s); /// Destructor virtual ~%(classname)s(); /// Tabulate which form coefficients are used by this integral virtual const std::vector & enabled_coefficients() const; /// Tabulate the tensor for the contribution from a local cell virtual void tabulate_tensor(double* %(restrict)s A, const double * const * %(restrict)s w, const double* %(restrict)s vertex_coordinates, int cell_orientation) const; }; """ cell_integral_implementation = """\ /// Constructor %(classname)s::%(classname)s(%(constructor_arguments)s) : ufc::cell_integral()%(initializer_list)s { %(constructor)s } /// Destructor %(classname)s::~%(classname)s() { %(destructor)s } /// Tabulate which form coefficients are used by this integral const std::vector & %(classname)s::enabled_coefficients() const { %(enabled_coefficients)s } /// Tabulate the tensor for the contribution from a local cell void %(classname)s::tabulate_tensor(double* %(restrict)s A, const double * const * %(restrict)s w, const double* %(restrict)s vertex_coordinates, int cell_orientation) const { %(tabulate_tensor)s } """ exterior_facet_integral_combined = """\ /// This class defines the interface for the tabulation of the /// exterior facet tensor corresponding to the local contribution to /// a form from the integral over an exterior facet. class %(classname)s: public ufc::exterior_facet_integral {%(members)s public: /// Constructor %(classname)s(%(constructor_arguments)s) : ufc::exterior_facet_integral()%(initializer_list)s { %(constructor)s } /// Destructor virtual ~%(classname)s() { %(destructor)s } /// Tabulate which form coefficients are used by this integral virtual const std::vector & enabled_coefficients() const { %(enabled_coefficients)s } /// Tabulate the tensor for the contribution from a local exterior facet virtual void tabulate_tensor(double* %(restrict)s A, const double * const * %(restrict)s w, const double* %(restrict)s vertex_coordinates, std::size_t facet, int cell_orientation) const { %(tabulate_tensor)s } }; """ exterior_facet_integral_header = """\ /// This class defines the interface for the tabulation of the /// exterior facet tensor corresponding to the local contribution to /// a form from the integral over an exterior facet. class %(classname)s: public ufc::exterior_facet_integral {%(members)s public: /// Constructor %(classname)s(%(constructor_arguments)s); /// Destructor virtual ~%(classname)s(); /// Tabulate which form coefficients are used by this integral virtual const std::vector & enabled_coefficients() const; /// Tabulate the tensor for the contribution from a local exterior facet virtual void tabulate_tensor(double* %(restrict)s A, const double * const * %(restrict)s w, const double* %(restrict)s vertex_coordinates, std::size_t facet, int cell_orientation) const; }; """ exterior_facet_integral_implementation = """\ /// Constructor %(classname)s::%(classname)s(%(constructor_arguments)s) : ufc::exterior_facet_integral()%(initializer_list)s { %(constructor)s } /// Destructor %(classname)s::~%(classname)s() { %(destructor)s } /// Tabulate which form coefficients are used by this integral const std::vector & %(classname)s::enabled_coefficients() const { %(enabled_coefficients)s } /// Tabulate the tensor for the contribution from a local exterior facet void %(classname)s::tabulate_tensor(double* %(restrict)s A, const double * const * %(restrict)s w, const double* %(restrict)s vertex_coordinates, std::size_t facet, int cell_orientation) const { %(tabulate_tensor)s } """ interior_facet_integral_combined = """\ /// This class defines the interface for the tabulation of the /// interior facet tensor corresponding to the local contribution to /// a form from the integral over an interior facet. class %(classname)s: public ufc::interior_facet_integral {%(members)s public: /// Constructor %(classname)s(%(constructor_arguments)s) : ufc::interior_facet_integral()%(initializer_list)s { %(constructor)s } /// Destructor virtual ~%(classname)s() { %(destructor)s } /// Tabulate which form coefficients are used by this integral virtual const std::vector & enabled_coefficients() const { %(enabled_coefficients)s } /// Tabulate the tensor for the contribution from a local interior facet virtual void tabulate_tensor(double* %(restrict)s A, const double * const * %(restrict)s w, const double* %(restrict)s vertex_coordinates_0, const double* %(restrict)s vertex_coordinates_1, std::size_t facet_0, std::size_t facet_1, int cell_orientation_0, int cell_orientation_1) const { %(tabulate_tensor)s } }; """ interior_facet_integral_header = """\ /// This class defines the interface for the tabulation of the /// interior facet tensor corresponding to the local contribution to /// a form from the integral over an interior facet. class %(classname)s: public ufc::interior_facet_integral {%(members)s public: /// Constructor %(classname)s(%(constructor_arguments)s); /// Destructor virtual ~%(classname)s(); /// Tabulate which form coefficients are used by this integral virtual const std::vector & enabled_coefficients() const; /// Tabulate the tensor for the contribution from a local interior facet virtual void tabulate_tensor(double* %(restrict)s A, const double * const * %(restrict)s w, const double* %(restrict)s vertex_coordinates_0, const double* %(restrict)s vertex_coordinates_1, std::size_t facet_0, std::size_t facet_1, int cell_orientation_0, int cell_orientation_1) const; }; """ interior_facet_integral_implementation = """\ /// Constructor %(classname)s::%(classname)s(%(constructor_arguments)s) : ufc::interior_facet_integral()%(initializer_list)s { %(constructor)s } /// Destructor %(classname)s::~%(classname)s() { %(destructor)s } /// Tabulate which form coefficients are used by this integral const std::vector & %(classname)s::enabled_coefficients() const { %(enabled_coefficients)s } /// Tabulate the tensor for the contribution from a local interior facet void %(classname)s::tabulate_tensor(double* %(restrict)s A, const double * const * %(restrict)s w, const double* %(restrict)s vertex_coordinates_0, const double* %(restrict)s vertex_coordinates_1, std::size_t facet_0, std::size_t facet_1, int cell_orientation_0, int cell_orientation_1) const { %(tabulate_tensor)s } """ vertex_integral_combined = """\ /// This class defines the interface for the tabulation of /// an expression evaluated at exactly one vertex. class %(classname)s: public ufc::vertex_integral {%(members)s public: /// Constructor %(classname)s(%(constructor_arguments)s) : ufc::vertex_integral()%(initializer_list)s { %(constructor)s } /// Destructor virtual ~%(classname)s() { %(destructor)s } /// Tabulate which form coefficients are used by this integral virtual const std::vector & enabled_coefficients() const { %(enabled_coefficients)s } /// Tabulate the tensor for the contribution from the local vertex virtual void tabulate_tensor(double* %(restrict)s A, const double * const * %(restrict)s w, const double* %(restrict)s vertex_coordinates, std::size_t vertex, int cell_orientation) const { %(tabulate_tensor)s } }; """ vertex_integral_header = """\ /// This class defines the interface for the tabulation of /// an expression evaluated at exactly one vertex. class %(classname)s: public ufc::vertex_integral {%(members)s public: /// Constructor %(classname)s(%(constructor_arguments)s); /// Destructor virtual ~%(classname)s(); /// Tabulate which form coefficients are used by this integral virtual const std::vector & enabled_coefficients() const; /// Tabulate the tensor for the contribution from the local vertex virtual void tabulate_tensor(double* %(restrict)s A, const double * const * %(restrict)s w, const double* %(restrict)s vertex_coordinates, std::size_t vertex, int cell_orientation) const; }; """ vertex_integral_implementation = """\ /// Constructor %(classname)s::%(classname)s(%(constructor_arguments)s) : ufc::vertex_integral()%(initializer_list)s { %(constructor)s } /// Destructor %(classname)s::~%(classname)s() { %(destructor)s } /// Tabulate which form coefficients are used by this integral const std::vector & %(classname)s::enabled_coefficients() const { %(enabled_coefficients)s } /// Tabulate the tensor for the contribution from the local vertex void %(classname)s::tabulate_tensor(double* %(restrict)s A, const double * const * %(restrict)s w, const double* %(restrict)s vertex_coordinates, std::size_t vertex, int cell_orientation) const { %(tabulate_tensor)s } """ custom_integral_combined = """\ /// This class defines the interface for the tabulation of the /// tensor corresponding to the local contribution to a form from /// the integral over a custom domain defined in terms of a set of /// quadrature points and weights. class %(classname)s: public ufc::custom_integral {%(members)s public: /// Constructor %(classname)s(%(constructor_arguments)s) : ufc::custom_integral()%(initializer_list)s { %(constructor)s } /// Destructor virtual ~%(classname)s() { %(destructor)s } /// Tabulate which form coefficients are used by this integral virtual const std::vector & enabled_coefficients() const { %(enabled_coefficients)s } /// Return the number of cells involved in evaluation of the integral virtual std::size_t num_cells() const { %(num_cells)s } /// Tabulate the tensor for the contribution from a custom domain virtual void tabulate_tensor(double* A, const double * const * w, const double* vertex_coordinates, std::size_t num_quadrature_points, const double* quadrature_points, const double* quadrature_weights, const double* facet_normals, int cell_orientation) const { %(tabulate_tensor)s } }; """ custom_integral_header = """\ /// This class defines the interface for the tabulation of the /// tensor corresponding to the local contribution to a form from /// the integral over a custom domain defined in terms of a set of /// quadrature points and weights. class %(classname)s: public ufc::custom_integral {%(members)s public: /// Constructor %(classname)s(%(constructor_arguments)s); /// Destructor virtual ~%(classname)s(); /// Tabulate which form coefficients are used by this integral virtual const std::vector & enabled_coefficients() const; /// Return the number of cells involved in evaluation of the integral virtual std::size_t num_cells() const; /// Tabulate the tensor for the contribution from a custom domain virtual void tabulate_tensor(double* A, const double * const * w, const double* vertex_coordinates, std::size_t num_quadrature_points, const double* quadrature_points, const double* quadrature_weights, const double* facet_normals, int cell_orientation) const; }; """ custom_integral_implementation = """\ /// Constructor %(classname)s::%(classname)s(%(constructor_arguments)s) : ufc::custom_integral()%(initializer_list)s { %(constructor)s } /// Destructor %(classname)s::~%(classname)s() { %(destructor)s } /// Tabulate which form coefficients are used by this integral const std::vector & %(classname)s::enabled_coefficients() const { %(enabled_coefficients)s } /// Return the number of cells involved in evaluation of the integral std::size_t %(classname)s::num_cells() const { %(num_cells)s } /// Tabulate the tensor for the contribution from a custom domain void %(classname)s::tabulate_tensor(double* A, const double * const * w, const double* vertex_coordinates, std::size_t num_quadrature_points, const double* quadrature_points, const double* quadrature_weights, const double* facet_normals, int cell_orientation) const { %(tabulate_tensor)s } """ ffc-1.6.0/ffc/codegeneration.py000066400000000000000000000457651255571034100163760ustar00rootroot00000000000000""" Compiler stage 4: Code generation --------------------------------- This module implements the generation of C++ code for the body of each UFC function from an (optimized) intermediate representation (OIR). """ # Copyright (C) 2009-2015 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Mehdi Nikbakht 2010 # Modified by Martin Alnaes, 2013-2015 # FFC modules from ffc.log import info, begin, end, debug_code from ffc.cpp import format, indent from ffc.cpp import set_exception_handling # FFC code generation modules from ffc.evaluatebasis import _evaluate_basis, _evaluate_basis_all from ffc.evaluatebasisderivatives import _evaluate_basis_derivatives from ffc.evaluatebasisderivatives import _evaluate_basis_derivatives_all from ffc.evaluatedof import evaluate_dof_and_dofs, affine_weights from ffc.interpolatevertexvalues import interpolate_vertex_values from ffc.representation import pick_representation, ufc_integral_types # Errors issued for non-implemented functions def _not_implemented(function_name, return_null=False): body = format["exception"]("%s not yet implemented." % function_name) if return_null: body += "\n" + format["return"](0) return body def generate_code(ir, prefix, parameters): "Generate code from intermediate representation." begin("Compiler stage 4: Generating code") # FIXME: Document option -fconvert_exceptions_to_warnings # FIXME: Remove option epsilon and just rely on precision? # Set code generation parameters # set_float_formatting(int(parameters["precision"])) set_exception_handling(parameters["convert_exceptions_to_warnings"]) # Extract representations ir_elements, ir_dofmaps, ir_integrals, ir_forms = ir # Generate code for elements info("Generating code for %d element(s)" % len(ir_elements)) code_elements = [_generate_element_code(ir, prefix, parameters) for ir in ir_elements] # Generate code for dofmaps info("Generating code for %d dofmap(s)" % len(ir_dofmaps)) code_dofmaps = [_generate_dofmap_code(ir, prefix, parameters) for ir in ir_dofmaps] # Generate code for integrals info("Generating code for integrals") code_integrals = [_generate_integral_code(ir, prefix, parameters) for ir in ir_integrals] # Generate code for forms info("Generating code for forms") code_forms = [_generate_form_code(ir, prefix, parameters) for ir in ir_forms] end() return code_elements, code_dofmaps, code_integrals, code_forms def _generate_element_code(ir, prefix, parameters): "Generate code for finite element from intermediate representation." # Skip code generation if ir is None if ir is None: return None # Prefetch formatting to speedup code generation ret = format["return"] classname = format["classname finite_element"] do_nothing = format["do nothing"] create = format["create foo"] # Codes generated together (evaluate_dof_code, evaluate_dofs_code) \ = evaluate_dof_and_dofs(ir["evaluate_dof"]) # Generate code code = {} code["classname"] = classname(prefix, ir["id"]) code["members"] = "" code["constructor"] = do_nothing code["constructor_arguments"] = "" code["initializer_list"] = "" code["destructor"] = do_nothing code["signature"] = ret('"%s"' % ir["signature"]) code["cell_shape"] = ret(format["cell"](ir["cell_shape"])) code["topological_dimension"] = ret(ir["topological_dimension"]) code["geometric_dimension"] = ret(ir["geometric_dimension"]) code["space_dimension"] = ret(ir["space_dimension"]) code["value_rank"] = ret(ir["value_rank"]) code["value_dimension"] = _value_dimension(ir["value_dimension"]) code["evaluate_basis"] = _evaluate_basis(ir["evaluate_basis"]) code["evaluate_basis_all"] = _evaluate_basis_all(ir["evaluate_basis"]) code["evaluate_basis_derivatives"] \ = _evaluate_basis_derivatives(ir["evaluate_basis"]) code["evaluate_basis_derivatives_all"] \ = _evaluate_basis_derivatives_all(ir["evaluate_basis"]) code["evaluate_dof"] = evaluate_dof_code code["evaluate_dofs"] = evaluate_dofs_code code["interpolate_vertex_values"] \ = interpolate_vertex_values(ir["interpolate_vertex_values"]) code["map_from_reference_cell"] \ = _not_implemented("map_from_reference_cell") code["map_to_reference_cell"] = _not_implemented("map_to_reference_cell") code["num_sub_elements"] = ret(ir["num_sub_elements"]) code["create_sub_element"] = _create_sub_element(prefix, ir) code["create"] = ret(create(code["classname"])) # Postprocess code _postprocess_code(code, parameters) return code def _generate_dofmap_code(ir, prefix, parameters): "Generate code for dofmap from intermediate representation." # Skip code generation if ir is None if ir is None: return None # Prefetch formatting to speedup code generation ret = format["return"] classname = format["classname dofmap"] declare = format["declaration"] assign = format["assign"] do_nothing = format["do nothing"] switch = format["switch"] f_int = format["int"] f_d = format["argument dimension"] create = format["create foo"] # Generate code code = {} code["classname"] = classname(prefix, ir["id"]) code["members"] = "" code["constructor"] = do_nothing code["constructor_arguments"] = "" code["initializer_list"] = "" code["destructor"] = do_nothing code["signature"] = ret('"%s"' % ir["signature"]) code["needs_mesh_entities"] \ = _needs_mesh_entities(ir["needs_mesh_entities"]) code["topological_dimension"] = ret(ir["topological_dimension"]) code["geometric_dimension"] = ret(ir["geometric_dimension"]) code["global_dimension"] = _global_dimension(ir["global_dimension"]) code["num_element_dofs"] = ret(ir["num_element_dofs"]) code["num_facet_dofs"] = ret(ir["num_facet_dofs"]) code["num_entity_dofs"] \ = switch(f_d, [ret(num) for num in ir["num_entity_dofs"]], ret(f_int(0))) code["tabulate_dofs"] = _tabulate_dofs(ir["tabulate_dofs"]) code["tabulate_facet_dofs"] \ = _tabulate_facet_dofs(ir["tabulate_facet_dofs"]) code["tabulate_entity_dofs"] \ = _tabulate_entity_dofs(ir["tabulate_entity_dofs"]) code["tabulate_coordinates"] \ = _tabulate_coordinates(ir["tabulate_coordinates"]) code["num_sub_dofmaps"] = ret(ir["num_sub_dofmaps"]) code["create_sub_dofmap"] = _create_sub_dofmap(prefix, ir) code["create"] = ret(create(code["classname"])) # Postprocess code _postprocess_code(code, parameters) return code def _generate_integral_code(ir, prefix, parameters): "Generate code for integrals from intermediate representation." # Skip code generation if ir is None if ir is None: return None # Select representation r = pick_representation(ir["representation"]) # Generate code code = r.generate_integral_code(ir, prefix, parameters) # Indent code (unused variables should already be removed) # FIXME: Remove this quick hack if ir["representation"] != "uflacs": _indent_code(code) return code def _generate_original_coefficient_position(original_coefficient_positions): # TODO: I don't know how to implement this using the format dict, # this will do for now: initializer_list = ", ".join(str(i) for i in original_coefficient_positions) code = '\n'.join([ "static const std::vector position({%s});" % initializer_list, "return position[i];", ]) return code def _generate_form_code(ir, prefix, parameters): "Generate code for form from intermediate representation." # Skip code generation if ir is None if ir is None: return None # Prefetch formatting to speedup code generation ret = format["return"] classname = format["classname form"] do_nothing = format["do nothing"] # Generate code code = {} code["classname"] = classname(prefix, ir["id"]) code["members"] = "" code["constructor"] = do_nothing code["constructor_arguments"] = "" code["initializer_list"] = "" code["destructor"] = do_nothing code["signature"] = ret('"%s"' % ir["signature"]) code["original_coefficient_position"] = _generate_original_coefficient_position(ir["original_coefficient_positions"]) code["rank"] = ret(ir["rank"]) code["num_coefficients"] = ret(ir["num_coefficients"]) code["create_finite_element"] = _create_finite_element(prefix, ir) code["create_dofmap"] = _create_dofmap(prefix, ir) for integral_type in ufc_integral_types: code["max_%s_subdomain_id" % integral_type] = ret(ir["max_%s_subdomain_id" % integral_type]) code["has_%s_integrals" % integral_type] = _has_foo_integrals(ir, integral_type) code["create_%s_integral" % integral_type] = _create_foo_integral(ir, integral_type, prefix) code["create_default_%s_integral" % integral_type] = _create_default_foo_integral(ir, integral_type, prefix) # Postprocess code _postprocess_code(code, parameters) return code #--- Code generation for non-trivial functions --- def _value_dimension(ir): "Generate code for value_dimension." ret = format["return"] axis = format["argument axis"] f_int = format["int"] if ir == (): return ret(1) return format["switch"](axis, [ret(n) for n in ir], ret(f_int(0))) def _needs_mesh_entities(ir): """ Generate code for needs_mesh_entities. ir is a list of num dofs per entity. """ ret = format["return"] boolean = format["bool"] dimension = format["argument dimension"] return format["switch"](dimension, [ret(boolean(c)) for c in ir], ret(boolean(False))) def _global_dimension(ir): """Generate code for global_dimension. ir[0] is a list of num dofs per entity.""" num_dofs = ir[0] component = format["component"] entities = format["num entities"] dimension = format["inner product"]([format["int"](d) for d in num_dofs], [component(entities, d) for d in range(len(num_dofs))]) # Handle global "elements" if any if ir[1]: dimension = format["add"]([dimension, format["int"](ir[1])]) try: dimension = format["int"](eval(dimension)) except: pass code = format["return"](dimension) return code def _tabulate_facet_dofs(ir): "Generate code for tabulate_facet_dofs." assign = format["assign"] component = format["component"] dofs = format["argument dofs"] cases = ["\n".join(assign(component(dofs, i), dof) for (i, dof) in enumerate(facet)) for facet in ir] return format["switch"](format["facet"](None), cases) def _tabulate_dofs(ir): "Generate code for tabulate_dofs." # Prefetch formats add = format["addition"] iadd = format["iadd"] multiply = format["multiply"] assign = format["assign"] component = format["component"] entity_index = format["entity index"] num_entities_format = format["num entities"] unsigned_int = format["uint declaration"] dofs_variable = format["argument dofs"] if ir is None: return assign(component(dofs_variable, 0), 0) # Extract representation (dofs_per_element, num_dofs_per_element, num_entities, need_offset, fakes) = ir # Declare offset if needed code = [] offset_name = "0" if need_offset: offset_name = "offset" code.append(format["declaration"](unsigned_int, offset_name, 0)) # Generate code for each element i = 0 for (no, num_dofs) in enumerate(dofs_per_element): # Handle fakes (Space of reals) if fakes[no] and num_dofs_per_element[no] == 1: code.append(assign(component(dofs_variable, i), offset_name)) if offset_name != "0": code.append(iadd(offset_name, 1)) i += 1 continue # Generate code for each degree of freedom for each dimension for (dim, num) in enumerate(num_dofs): # Ignore if no dofs for this dimension if not num[0]: continue for (k, dofs) in enumerate(num): v = multiply([len(num[k]), component(entity_index, (dim, k))]) for (j, dof) in enumerate(dofs): value = add([offset_name, v, j]) code.append(assign(component(dofs_variable, dof+i), value)) # Update offset corresponding to mesh entity: if need_offset: addition = multiply([len(num[0]), component(num_entities_format, dim)]) code.append(iadd("offset", addition)) i += num_dofs_per_element[no] return "\n".join(code) def _tabulate_coordinates(ir): "Generate code for tabulate_coordinates." # Raise error if tabulate_coordinates is ill-defined if not ir: msg = "tabulate_coordinates is not defined for this element" return format["exception"](msg) # Extract formats: inner_product = format["inner product"] component = format["component"] precision = format["float"] assign = format["assign"] f_x = format["vertex_coordinates"] coordinates = format["argument coordinates"] # Extract coordinates and cell dimension gdim = ir["gdim"] tdim = ir["tdim"] # Aid mapping points from reference to physical element coefficients = affine_weights(tdim) # Generate code for each point and each component code = [] for (i, coordinate) in enumerate(ir["points"]): w = coefficients(coordinate) for j in range(gdim): # Compute physical coordinate coords = [component(f_x(), (k*gdim + j,)) for k in range(tdim + 1)] value = inner_product(w, coords) # Assign coordinate code.append(assign(component(coordinates, (i*gdim + j)), value)) return "\n".join(code) def _tabulate_entity_dofs(ir): "Generate code for tabulate_entity_dofs." # Extract variables from ir entity_dofs, num_dofs_per_entity = ir # Prefetch formats assign = format["assign"] component = format["component"] f_d = format["argument dimension"] f_i = format["argument entity"] dofs = format["argument dofs"] # Add check that dimension and number of mesh entities is valid dim = len(num_dofs_per_entity) excpt = format["exception"]("%s is larger than dimension (%d)" % (f_d, dim - 1)) code = [format["if"]("%s > %d" % (f_d, dim-1), excpt)] # Generate cases for each dimension: all_cases = ["" for d in range(dim)] for d in range(dim): # Ignore if no entities for this dimension if num_dofs_per_entity[d] == 0: continue # Add check that given entity is valid: num_entities = len(entity_dofs[d].keys()) excpt = format["exception"]("%s is larger than number of entities (%d)" % (f_i, num_entities - 1)) check = format["if"]("%s > %d" % (f_i, num_entities - 1), excpt) # Generate cases for each mesh entity cases = ["\n".join(assign(component(dofs, j), dof) for (j, dof) in enumerate(entity_dofs[d][entity])) for entity in range(num_entities)] # Generate inner switch with preceding check all_cases[d] = "\n".join([check, format["switch"](f_i, cases)]) # Generate outer switch code.append(format["switch"](f_d, all_cases)) return "\n".join(code) #--- Utility functions --- def _create_foo(prefix, class_name, postfix, arg, numbers=None): "Generate code for create_." ret = format["return"] create = format["create foo"] class_names = ["%s_%s_%d" % (prefix.lower(), class_name, i) for i in postfix] cases = [ret(create(name)) for name in class_names] default = ret(0) return format["switch"](arg, cases, default=default, numbers=numbers) def _create_finite_element(prefix, ir): f_i = format["argument sub"] return _create_foo(prefix, "finite_element", ir["create_finite_element"], f_i) def _create_dofmap(prefix, ir): f_i = format["argument sub"] return _create_foo(prefix, "dofmap", ir["create_dofmap"], f_i) def _create_sub_element(prefix, ir): f_i = format["argument sub"] return _create_foo(prefix, "finite_element", ir["create_sub_element"], f_i) def _create_sub_dofmap(prefix, ir): f_i = format["argument sub"] return _create_foo(prefix, "dofmap", ir["create_sub_dofmap"], f_i) def _create_foo_integral(ir, integral_type, prefix): "Generate code for create__integral." f_i = format["argument subdomain"] class_name = integral_type + "_integral_" + str(ir["id"]) postfix = ir["create_" + integral_type + "_integral"] return _create_foo(prefix, class_name, postfix, f_i, numbers=postfix) def _has_foo_integrals(ir, integral_type): ret = format["return"] b = format["bool"] i = ir["has_%s_integrals" % integral_type] return ret(b(i)) def _create_default_foo_integral(ir, integral_type, prefix): "Generate code for create_default__integral." ret = format["return"] postfix = ir["create_default_" + integral_type + "_integral"] if postfix is None: return ret(0) else: create = format["create foo"] class_name = integral_type + "_integral_" + str(ir["id"]) name = "%s_%s_%s" % (prefix.lower(), class_name, postfix) return ret(create(name)) def _postprocess_code(code, parameters): "Postprocess generated code." _indent_code(code) _remove_code(code, parameters) def _indent_code(code): "Indent code that should be indented." for key in code: if not key in ("classname", "members", "constructor_arguments", "initializer_list", "additional_includes_set", "restrict", "class_type"): code[key] = indent(code[key], 4) def _remove_code(code, parameters): "Remove code that should not be generated." for key in code: flag = "no-" + key if flag in parameters and parameters[flag]: msg = "// Function %s not generated (compiled with -f%s)" \ % (key, flag) code[key] = format["exception"](msg) ffc-1.6.0/ffc/codesnippets.py000066400000000000000000001110141255571034100160650ustar00rootroot00000000000000"Code snippets for code generation." # Copyright (C) 2007-2013 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Kristian B. Oelgaard 2010-2013 # Modified by Marie Rognes 2007-2012 # Modified by Peter Brune 2009 # Modified by Martin Alnaes, 2013 # # First added: 2007-02-28 # Last changed: 2014-06-10 # Code snippets __all__ = ["comment_ufc", "comment_dolfin", "header_h", "header_c", "footer", "compute_jacobian", "compute_jacobian_inverse", "eval_basis_decl", "eval_basis_init", "eval_basis", "eval_basis_copy", "eval_derivs_decl", "eval_derivs_init", "eval_derivs", "eval_derivs_copy"] __old__ = ["evaluate_f", "facet_determinant", "map_onto_physical", "fiat_coordinate_map", "transform_snippet", "scale_factor", "combinations_snippet", "normal_direction", "facet_normal", "ip_coordinates", "cell_volume", "circumradius", "facet_area", "min_facet_edge_length", "max_facet_edge_length", "orientation_snippet"] __all__ += __old__ comment_ufc = """\ // This code conforms with the UFC specification version %(ufc_version)s // and was automatically generated by FFC version %(ffc_version)s. """ comment_dolfin = """\ // This code conforms with the UFC specification version %(ufc_version)s // and was automatically generated by FFC version %(ffc_version)s. // // This code was generated with the option '-l dolfin' and // contains DOLFIN-specific wrappers that depend on DOLFIN. """ # Code snippets for headers and footers header_h = """\ #ifndef __%(prefix_upper)s_H #define __%(prefix_upper)s_H #include #include #include #include """ header_c = """\ #include "%(prefix)s.h" """ footer = """\ #endif """ # Code snippets for computing Jacobians _compute_jacobian_interval_1d = """\ // Compute Jacobian double J%(restriction)s[1]; compute_jacobian_interval_1d(J%(restriction)s, vertex_coordinates%(restriction)s); """ _compute_jacobian_interval_2d = """\ // Compute Jacobian double J%(restriction)s[2]; compute_jacobian_interval_2d(J%(restriction)s, vertex_coordinates%(restriction)s); """ _compute_jacobian_interval_3d = """\ // Compute Jacobian double J%(restriction)s[3]; compute_jacobian_interval_3d(J%(restriction)s, vertex_coordinates%(restriction)s); """ _compute_jacobian_triangle_2d = """\ // Compute Jacobian double J%(restriction)s[4]; compute_jacobian_triangle_2d(J%(restriction)s, vertex_coordinates%(restriction)s); """ _compute_jacobian_triangle_3d = """\ // Compute Jacobian double J%(restriction)s[6]; compute_jacobian_triangle_3d(J%(restriction)s, vertex_coordinates%(restriction)s); """ _compute_jacobian_tetrahedron_3d = """\ // Compute Jacobian double J%(restriction)s[9]; compute_jacobian_tetrahedron_3d(J%(restriction)s, vertex_coordinates%(restriction)s); """ compute_jacobian = {1: {1: _compute_jacobian_interval_1d, 2: _compute_jacobian_interval_2d, 3: _compute_jacobian_interval_3d}, 2: {2: _compute_jacobian_triangle_2d, 3: _compute_jacobian_triangle_3d}, 3: {3: _compute_jacobian_tetrahedron_3d}} # Code snippets for computing Jacobian inverses _compute_jacobian_inverse_interval_1d = """\ // Compute Jacobian inverse and determinant double K%(restriction)s[1]; double detJ%(restriction)s; compute_jacobian_inverse_interval_1d(K%(restriction)s, detJ%(restriction)s, J%(restriction)s); """ _compute_jacobian_inverse_interval_2d = """\ // Compute Jacobian inverse and determinant double K%(restriction)s[2]; double detJ%(restriction)s; compute_jacobian_inverse_interval_2d(K%(restriction)s, detJ%(restriction)s, J%(restriction)s); """ _compute_jacobian_inverse_interval_3d = """\ // Compute Jacobian inverse and determinant double K%(restriction)s[3]; double detJ%(restriction)s; compute_jacobian_inverse_interval_3d(K%(restriction)s, detJ%(restriction)s, J%(restriction)s); """ _compute_jacobian_inverse_triangle_2d = """\ // Compute Jacobian inverse and determinant double K%(restriction)s[4]; double detJ%(restriction)s; compute_jacobian_inverse_triangle_2d(K%(restriction)s, detJ%(restriction)s, J%(restriction)s); """ _compute_jacobian_inverse_triangle_3d = """\ // Compute Jacobian inverse and determinant double K%(restriction)s[6]; double detJ%(restriction)s; compute_jacobian_inverse_triangle_3d(K%(restriction)s, detJ%(restriction)s, J%(restriction)s); """ _compute_jacobian_inverse_tetrahedron_3d = """\ // Compute Jacobian inverse and determinant double K%(restriction)s[9]; double detJ%(restriction)s; compute_jacobian_inverse_tetrahedron_3d(K%(restriction)s, detJ%(restriction)s, J%(restriction)s); """ compute_jacobian_inverse = {1: {1: _compute_jacobian_inverse_interval_1d, 2: _compute_jacobian_inverse_interval_2d, 3: _compute_jacobian_inverse_interval_3d}, 2: {2: _compute_jacobian_inverse_triangle_2d, 3: _compute_jacobian_inverse_triangle_3d}, 3: {3: _compute_jacobian_inverse_tetrahedron_3d}} # Code snippet for scale factor scale_factor = """\ // Set scale factor const double det = std::abs(detJ);""" # FIXME: Old stuff below that should be cleaned up or moved to ufc_geometry.h orientation_snippet = """ // Check orientation if (cell_orientation%(restriction)s == -1) throw std::runtime_error("cell orientation must be defined (not -1)"); // (If cell_orientation == 1 = down, multiply det(J) by -1) else if (cell_orientation%(restriction)s == 1) detJ%(restriction)s *= -1; """ evaluate_f = "f.evaluate(vals, y, c);" _facet_determinant_1D = """\ // Facet determinant 1D (vertex) const double det = 1.0;""" _facet_determinant_2D = """\ // Get vertices on edge static unsigned int edge_vertices[3][2] = {{1, 2}, {0, 2}, {0, 1}}; const unsigned int v0 = edge_vertices[facet%(restriction)s][0]; const unsigned int v1 = edge_vertices[facet%(restriction)s][1]; // Compute scale factor (length of edge scaled by length of reference interval) const double dx0 = vertex_coordinates%(restriction)s[2*v1 + 0] - vertex_coordinates%(restriction)s[2*v0 + 0]; const double dx1 = vertex_coordinates%(restriction)s[2*v1 + 1] - vertex_coordinates%(restriction)s[2*v0 + 1]; const double det = std::sqrt(dx0*dx0 + dx1*dx1); """ _facet_determinant_2D_1D = """\ // Facet determinant 1D in 2D (vertex) const double det = 1.0; """ _facet_determinant_3D = """\ // Get vertices on face static unsigned int face_vertices[4][3] = {{1, 2, 3}, {0, 2, 3}, {0, 1, 3}, {0, 1, 2}}; const unsigned int v0 = face_vertices[facet%(restriction)s][0]; const unsigned int v1 = face_vertices[facet%(restriction)s][1]; const unsigned int v2 = face_vertices[facet%(restriction)s][2]; // Compute scale factor (area of face scaled by area of reference triangle) const double a0 = (vertex_coordinates%(restriction)s[3*v0 + 1]*vertex_coordinates%(restriction)s[3*v1 + 2] + vertex_coordinates%(restriction)s[3*v0 + 2]*vertex_coordinates%(restriction)s[3*v2 + 1] + vertex_coordinates%(restriction)s[3*v1 + 1]*vertex_coordinates%(restriction)s[3*v2 + 2]) - (vertex_coordinates%(restriction)s[3*v2 + 1]*vertex_coordinates%(restriction)s[3*v1 + 2] + vertex_coordinates%(restriction)s[3*v2 + 2]*vertex_coordinates%(restriction)s[3*v0 + 1] + vertex_coordinates%(restriction)s[3*v1 + 1]*vertex_coordinates%(restriction)s[3*v0 + 2]); const double a1 = (vertex_coordinates%(restriction)s[3*v0 + 2]*vertex_coordinates%(restriction)s[3*v1 + 0] + vertex_coordinates%(restriction)s[3*v0 + 0]*vertex_coordinates%(restriction)s[3*v2 + 2] + vertex_coordinates%(restriction)s[3*v1 + 2]*vertex_coordinates%(restriction)s[3*v2 + 0]) - (vertex_coordinates%(restriction)s[3*v2 + 2]*vertex_coordinates%(restriction)s[3*v1 + 0] + vertex_coordinates%(restriction)s[3*v2 + 0]*vertex_coordinates%(restriction)s[3*v0 + 2] + vertex_coordinates%(restriction)s[3*v1 + 2]*vertex_coordinates%(restriction)s[3*v0 + 0]); const double a2 = (vertex_coordinates%(restriction)s[3*v0 + 0]*vertex_coordinates%(restriction)s[3*v1 + 1] + vertex_coordinates%(restriction)s[3*v0 + 1]*vertex_coordinates%(restriction)s[3*v2 + 0] + vertex_coordinates%(restriction)s[3*v1 + 0]*vertex_coordinates%(restriction)s[3*v2 + 1]) - (vertex_coordinates%(restriction)s[3*v2 + 0]*vertex_coordinates%(restriction)s[3*v1 + 1] + vertex_coordinates%(restriction)s[3*v2 + 1]*vertex_coordinates%(restriction)s[3*v0 + 0] + vertex_coordinates%(restriction)s[3*v1 + 0]*vertex_coordinates%(restriction)s[3*v0 + 1]); const double det = std::sqrt(a0*a0 + a1*a1 + a2*a2); """ _facet_determinant_3D_2D = """\ // Facet determinant 2D in 3D (edge) // Get vertices on edge static unsigned int edge_vertices[3][2] = {{1, 2}, {0, 2}, {0, 1}}; const unsigned int v0 = edge_vertices[facet%(restriction)s][0]; const unsigned int v1 = edge_vertices[facet%(restriction)s][1]; // Compute scale factor (length of edge scaled by length of reference interval) const double dx0 = vertex_coordinates%(restriction)s[3*v1 + 0] - vertex_coordinates%(restriction)s[3*v0 + 0]; const double dx1 = vertex_coordinates%(restriction)s[3*v1 + 1] - vertex_coordinates%(restriction)s[3*v0 + 1]; const double dx2 = vertex_coordinates%(restriction)s[3*v1 + 2] - vertex_coordinates%(restriction)s[3*v0 + 2]; const double det = std::sqrt(dx0*dx0 + dx1*dx1 + dx2*dx2); """ _facet_determinant_3D_1D = """\ // Facet determinant 1D in 3D (vertex) const double det = 1.0; """ _normal_direction_1D = """\ const bool direction = facet%(restriction)s == 0 ? vertex_coordinates%(restriction)s[0] > vertex_coordinates%(restriction)s[1] : vertex_coordinates%(restriction)s[1] > vertex_coordinates%(restriction)s[0]; """ _normal_direction_2D = """\ const bool direction = dx1*(vertex_coordinates%(restriction)s[2*%(facet)s] - vertex_coordinates%(restriction)s[2*v0]) - dx0*(vertex_coordinates%(restriction)s[2*%(facet)s + 1] - vertex_coordinates%(restriction)s[2*v0 + 1]) < 0; """ _normal_direction_3D = """\ const bool direction = a0*(vertex_coordinates%(restriction)s[3*%(facet)s] - vertex_coordinates%(restriction)s[3*v0]) + a1*(vertex_coordinates%(restriction)s[3*%(facet)s + 1] - vertex_coordinates%(restriction)s[3*v0 + 1]) + a2*(vertex_coordinates%(restriction)s[3*%(facet)s + 2] - vertex_coordinates%(restriction)s[3*v0 + 2]) < 0; """ # MER: Coding all up in _facet_normal_ND_M_D for now; these are # therefore empty. _normal_direction_2D_1D = "" _normal_direction_3D_2D = "" _normal_direction_3D_1D = "" _facet_normal_1D = """ // Facet normals are 1.0 or -1.0: (-1.0) <-- X------X --> (1.0) const double n%(restriction)s = %(direction)sdirection ? 1.0 : -1.0;""" _facet_normal_2D = """\ // Compute facet normals from the facet scale factor constants const double n%(restriction)s0 = %(direction)sdirection ? dx1 / det : -dx1 / det; const double n%(restriction)s1 = %(direction)sdirection ? -dx0 / det : dx0 / det;""" _facet_normal_2D_1D = """ // Compute facet normal double n%(restriction)s0 = 0.0; double n%(restriction)s1 = 0.0; if (facet%(restriction)s == 0) { n%(restriction)s0 = vertex_coordinates%(restriction)s[0] - vertex_coordinates%(restriction)s[2]; n%(restriction)s1 = vertex_coordinates%(restriction)s[1] - vertex_coordinates%(restriction)s[3]; } else { n%(restriction)s0 = vertex_coordinates%(restriction)s[2] - vertex_coordinates%(restriction)s[0]; n%(restriction)s1 = vertex_coordinates%(restriction)s[3] - vertex_coordinates%(restriction)s[1]; } const double n%(restriction)s_length = std::sqrt(n%(restriction)s0*n%(restriction)s0 + n%(restriction)s1*n%(restriction)s1); n%(restriction)s0 /= n%(restriction)s_length; n%(restriction)s1 /= n%(restriction)s_length; """ _facet_normal_3D = """ const double n%(restriction)s0 = %(direction)sdirection ? a0 / det : -a0 / det; const double n%(restriction)s1 = %(direction)sdirection ? a1 / det : -a1 / det; const double n%(restriction)s2 = %(direction)sdirection ? a2 / det : -a2 / det;""" _facet_normal_3D_2D = """ // Compute facet normal for triangles in 3D const unsigned int vertex%(restriction)s0 = facet%(restriction)s; // Get coordinates corresponding the vertex opposite this // static unsigned int edge_vertices[3][2] = {{1, 2}, {0, 2}, {0, 1}}; const unsigned int vertex%(restriction)s1 = edge_vertices[facet%(restriction)s][0]; const unsigned int vertex%(restriction)s2 = edge_vertices[facet%(restriction)s][1]; // Define vectors n = (p2 - p0) and t = normalized (p2 - p1) double n%(restriction)s0 = vertex_coordinates%(restriction)s[3*vertex%(restriction)s2 + 0] - vertex_coordinates%(restriction)s[3*vertex%(restriction)s0 + 0]; double n%(restriction)s1 = vertex_coordinates%(restriction)s[3*vertex%(restriction)s2 + 1] - vertex_coordinates%(restriction)s[3*vertex%(restriction)s0 + 1]; double n%(restriction)s2 = vertex_coordinates%(restriction)s[3*vertex%(restriction)s2 + 2] - vertex_coordinates%(restriction)s[3*vertex%(restriction)s0 + 2]; double t%(restriction)s0 = vertex_coordinates%(restriction)s[3*vertex%(restriction)s2 + 0] - vertex_coordinates%(restriction)s[3*vertex%(restriction)s1 + 0]; double t%(restriction)s1 = vertex_coordinates%(restriction)s[3*vertex%(restriction)s2 + 1] - vertex_coordinates%(restriction)s[3*vertex%(restriction)s1 + 1]; double t%(restriction)s2 = vertex_coordinates%(restriction)s[3*vertex%(restriction)s2 + 2] - vertex_coordinates%(restriction)s[3*vertex%(restriction)s1 + 2]; const double t%(restriction)s_length = std::sqrt(t%(restriction)s0*t%(restriction)s0 + t%(restriction)s1*t%(restriction)s1 + t%(restriction)s2*t%(restriction)s2); t%(restriction)s0 /= t%(restriction)s_length; t%(restriction)s1 /= t%(restriction)s_length; t%(restriction)s2 /= t%(restriction)s_length; // Subtract, the projection of (p2 - p0) onto (p2 - p1), from (p2 - p0) const double ndott%(restriction)s = t%(restriction)s0*n%(restriction)s0 + t%(restriction)s1*n%(restriction)s1 + t%(restriction)s2*n%(restriction)s2; n%(restriction)s0 -= ndott%(restriction)s*t%(restriction)s0; n%(restriction)s1 -= ndott%(restriction)s*t%(restriction)s1; n%(restriction)s2 -= ndott%(restriction)s*t%(restriction)s2; const double n%(restriction)s_length = std::sqrt(n%(restriction)s0*n%(restriction)s0 + n%(restriction)s1*n%(restriction)s1 + n%(restriction)s2*n%(restriction)s2); // Normalize n%(restriction)s0 /= n%(restriction)s_length; n%(restriction)s1 /= n%(restriction)s_length; n%(restriction)s2 /= n%(restriction)s_length; """ _facet_normal_3D_1D = """ // Compute facet normal double n%(restriction)s0 = 0.0; double n%(restriction)s1 = 0.0; double n%(restriction)s2 = 0.0; if (facet%(restriction)s == 0) { n%(restriction)s0 = vertex_coordinates%(restriction)s[0] - vertex_coordinates%(restriction)s[3]; n%(restriction)s1 = vertex_coordinates%(restriction)s[1] - vertex_coordinates%(restriction)s[4]; n%(restriction)s1 = vertex_coordinates%(restriction)s[2] - vertex_coordinates%(restriction)s[5]; } else { n%(restriction)s0 = vertex_coordinates%(restriction)s[3] - vertex_coordinates%(restriction)s[0]; n%(restriction)s1 = vertex_coordinates%(restriction)s[4] - vertex_coordinates%(restriction)s[1]; n%(restriction)s1 = vertex_coordinates%(restriction)s[5] - vertex_coordinates%(restriction)s[2]; } const double n%(restriction)s_length = std::sqrt(n%(restriction)s0*n%(restriction)s0 + n%(restriction)s1*n%(restriction)s1 + n%(restriction)s2*n%(restriction)s2); n%(restriction)s0 /= n%(restriction)s_length; n%(restriction)s1 /= n%(restriction)s_length; n%(restriction)s2 /= n%(restriction)s_length; """ _cell_volume_1D = """\ // Compute cell volume const double volume%(restriction)s = std::abs(detJ%(restriction)s); """ _cell_volume_2D = """\ // Compute cell volume const double volume%(restriction)s = std::abs(detJ%(restriction)s)/2.0; """ _cell_volume_2D_1D = """\ // Compute cell volume of interval in 2D const double volume%(restriction)s = std::abs(detJ%(restriction)s); """ _cell_volume_3D = """\ // Compute cell volume const double volume%(restriction)s = std::abs(detJ%(restriction)s)/6.0; """ _cell_volume_3D_1D = """\ // Compute cell volume of interval in 3D const double volume%(restriction)s = std::abs(detJ%(restriction)s); """ _cell_volume_3D_2D = """\ // Compute cell volume of triangle in 3D const double volume%(restriction)s = std::abs(detJ%(restriction)s)/2.0; """ _circumradius_1D = """\ // Compute circumradius; in 1D it is equal to half the cell length const double circumradius%(restriction)s = std::abs(detJ%(restriction)s)/2.0; """ _circumradius_2D = """\ // Compute circumradius of triangle in 2D const double v1v2%(restriction)s = std::sqrt((vertex_coordinates%(restriction)s[4] - vertex_coordinates%(restriction)s[2])*(vertex_coordinates%(restriction)s[4] - vertex_coordinates%(restriction)s[2]) + (vertex_coordinates%(restriction)s[5] - vertex_coordinates%(restriction)s[3])*(vertex_coordinates%(restriction)s[5] - vertex_coordinates%(restriction)s[3]) ); const double v0v2%(restriction)s = std::sqrt(J%(restriction)s[3]*J%(restriction)s[3] + J%(restriction)s[1]*J%(restriction)s[1]); const double v0v1%(restriction)s = std::sqrt(J%(restriction)s[0]*J%(restriction)s[0] + J%(restriction)s[2]*J%(restriction)s[2]); const double circumradius%(restriction)s = 0.25*(v1v2%(restriction)s*v0v2%(restriction)s*v0v1%(restriction)s)/(volume%(restriction)s); """ _circumradius_2D_1D = """\ // Compute circumradius of interval in 3D (1/2 volume) const double circumradius%(restriction)s = std::abs(detJ%(restriction)s)/2.0; """ _circumradius_3D = """\ // Compute circumradius const double v1v2%(restriction)s = std::sqrt( (vertex_coordinates%(restriction)s[6] - vertex_coordinates%(restriction)s[3])*(vertex_coordinates%(restriction)s[6] - vertex_coordinates%(restriction)s[3]) + (vertex_coordinates%(restriction)s[7] - vertex_coordinates%(restriction)s[4])*(vertex_coordinates%(restriction)s[7] - vertex_coordinates%(restriction)s[4]) + (vertex_coordinates%(restriction)s[8] - vertex_coordinates%(restriction)s[5])*(vertex_coordinates%(restriction)s[8] - vertex_coordinates%(restriction)s[5]) ); const double v0v2%(restriction)s = std::sqrt(J%(restriction)s[1]*J%(restriction)s[1] + J%(restriction)s[4]*J%(restriction)s[4] + J%(restriction)s[7]*J%(restriction)s[7]); const double v0v1%(restriction)s = std::sqrt(J%(restriction)s[0]*J%(restriction)s[0] + J%(restriction)s[3]*J%(restriction)s[3] + J%(restriction)s[6]*J%(restriction)s[6]); const double v0v3%(restriction)s = std::sqrt(J%(restriction)s[2]*J%(restriction)s[2] + J%(restriction)s[5]*J%(restriction)s[5] + J%(restriction)s[8]*J%(restriction)s[8]); const double v1v3%(restriction)s = std::sqrt( (vertex_coordinates%(restriction)s[9] - vertex_coordinates%(restriction)s[3])*(vertex_coordinates%(restriction)s[9] - vertex_coordinates%(restriction)s[3]) + (vertex_coordinates%(restriction)s[10] - vertex_coordinates%(restriction)s[4])*(vertex_coordinates%(restriction)s[10] - vertex_coordinates%(restriction)s[4]) + (vertex_coordinates%(restriction)s[11] - vertex_coordinates%(restriction)s[5])*(vertex_coordinates%(restriction)s[11] - vertex_coordinates%(restriction)s[5]) ); const double v2v3%(restriction)s = std::sqrt( (vertex_coordinates%(restriction)s[9] - vertex_coordinates%(restriction)s[6])*(vertex_coordinates%(restriction)s[9] - vertex_coordinates%(restriction)s[6]) + (vertex_coordinates%(restriction)s[10] - vertex_coordinates%(restriction)s[7])*(vertex_coordinates%(restriction)s[10] - vertex_coordinates%(restriction)s[7]) + (vertex_coordinates%(restriction)s[11] - vertex_coordinates%(restriction)s[8])*(vertex_coordinates%(restriction)s[11] - vertex_coordinates%(restriction)s[8]) ); const double la%(restriction)s = v1v2%(restriction)s*v0v3%(restriction)s; const double lb%(restriction)s = v0v2%(restriction)s*v1v3%(restriction)s; const double lc%(restriction)s = v0v1%(restriction)s*v2v3%(restriction)s; const double s%(restriction)s = 0.5*(la%(restriction)s+lb%(restriction)s+lc%(restriction)s); const double area%(restriction)s = std::sqrt(s%(restriction)s*(s%(restriction)s-la%(restriction)s)*(s%(restriction)s-lb%(restriction)s)*(s%(restriction)s-lc%(restriction)s)); const double circumradius%(restriction)s = area%(restriction)s / ( 6.0*volume%(restriction)s ); """ _circumradius_3D_1D = """\ // Compute circumradius of interval in 3D (1/2 volume) const double circumradius%(restriction)s = std::abs(detJ%(restriction)s)/2.0; """ _circumradius_3D_2D = """\ // Compute circumradius of triangle in 3D const double v1v2%(restriction)s = std::sqrt( (vertex_coordinates%(restriction)s[6] - vertex_coordinates%(restriction)s[3])*(vertex_coordinates%(restriction)s[6] - vertex_coordinates%(restriction)s[3]) + (vertex_coordinates%(restriction)s[7] - vertex_coordinates%(restriction)s[4])*(vertex_coordinates%(restriction)s[7] - vertex_coordinates%(restriction)s[4]) + (vertex_coordinates%(restriction)s[8] - vertex_coordinates%(restriction)s[5])*(vertex_coordinates%(restriction)s[8] - vertex_coordinates%(restriction)s[5])); const double v0v2%(restriction)s = std::sqrt( J%(restriction)s[3]*J%(restriction)s[3] + J%(restriction)s[1]*J%(restriction)s[1] + J%(restriction)s[5]*J%(restriction)s[5]); const double v0v1%(restriction)s = std::sqrt( J%(restriction)s[0]*J%(restriction)s[0] + J%(restriction)s[2]*J%(restriction)s[2] + J%(restriction)s[4]*J%(restriction)s[4]); const double circumradius%(restriction)s = 0.25*(v1v2%(restriction)s*v0v2%(restriction)s*v0v1%(restriction)s)/(volume%(restriction)s); """ _facet_area_1D = """\ // Facet area (FIXME: Should this be 0.0?) const double facet_area = 1.0;""" _facet_area_2D = """\ // Facet area const double facet_area = det;""" _facet_area_2D_1D = """\ // Facet area const double facet_area = 1.0;""" _facet_area_3D = """\ // Facet area (divide by two because 'det' is scaled by area of reference triangle) const double facet_area = det/2.0;""" _facet_area_3D_1D = """\ // Facet area const double facet_area = 1.0;""" _facet_area_3D_2D = """\ // Facet area const double facet_area = det;""" evaluate_basis_dofmap = """\ unsigned int element = 0; unsigned int tmp = 0; for (unsigned int j = 0; j < %d; j++) { if (tmp + dofs_per_element[j] > i) { i -= tmp; element = element_types[j]; break; } else tmp += dofs_per_element[j]; }""" _min_facet_edge_length_3D = """\ // Min edge length of facet double min_facet_edge_length; compute_min_facet_edge_length_tetrahedron_3d(min_facet_edge_length, facet%(restriction)s, vertex_coordinates%(restriction)s); """ _max_facet_edge_length_3D = """\ // Max edge length of facet double max_facet_edge_length; compute_max_facet_edge_length_tetrahedron_3d(max_facet_edge_length, facet%(restriction)s, vertex_coordinates%(restriction)s); """ # FIXME: This is dead slow because of all the new calls # Used in evaluate_basis_derivatives. For second order derivatives in 2D it will # generate the combinations: [(0, 0), (0, 1), (1, 0), (1, 1)] (i.e., xx, xy, yx, yy) # which will also be the ordering of derivatives in the return value. combinations_snippet = """\ // Declare two dimensional array that holds combinations of derivatives and initialise unsigned int %(combinations)s[%(max_num_derivatives)s][%(max_degree)s]; for (unsigned int row = 0; row < %(max_num_derivatives)s; row++) { for (unsigned int col = 0; col < %(max_degree)s; col++) %(combinations)s[row][col] = 0; } // Generate combinations of derivatives for (unsigned int row = 1; row < %(num_derivatives)s; row++) { for (unsigned int num = 0; num < row; num++) { for (unsigned int col = %(n)s-1; col+1 > 0; col--) { if (%(combinations)s[row][col] + 1 > %(dimension-1)s) %(combinations)s[row][col] = 0; else { %(combinations)s[row][col] += 1; break; } } } }""" def _transform_snippet(tdim, gdim): if tdim == gdim: _t = "" _g = "" else: _t = "_t" _g = "_g" # Matricize K_ij -> {K_ij} matrix = "{{" + "}, {".join([", ".join(["K[%d]" % (t*gdim + g) for g in range(gdim)]) for t in range(tdim)]) + "}};\n\n" snippet = """\ // Compute inverse of Jacobian const double %%(K)s[%d][%d] = %s""" % (tdim, gdim, matrix) snippet +="""// Declare transformation matrix // Declare pointer to two dimensional array and initialise double %%(transform)s[%%(max_g_deriv)s][%%(max_t_deriv)s]; for (unsigned int j = 0; j < %%(num_derivatives)s%(g)s; j++) { for (unsigned int k = 0; k < %%(num_derivatives)s%(t)s; k++) %%(transform)s[j][k] = 1; } // Construct transformation matrix for (unsigned int row = 0; row < %%(num_derivatives)s%(g)s; row++) { for (unsigned int col = 0; col < %%(num_derivatives)s%(t)s; col++) { for (unsigned int k = 0; k < %%(n)s; k++) %%(transform)s[row][col] *= %%(K)s[%%(combinations)s%(t)s[col][k]][%%(combinations)s%(g)s[row][k]]; } }""" % {"t":_t, "g":_g} return snippet # Codesnippets used in evaluate_dof _map_onto_physical_1D = """\ // Evaluate basis functions for affine mapping const double w0 = 1.0 - X_%(i)d[%(j)s][0]; const double w1 = X_%(i)d[%(j)s][0]; // Compute affine mapping y = F(X) y[0] = w0*vertex_coordinates[0] + w1*vertex_coordinates[1];""" _map_onto_physical_2D = """\ // Evaluate basis functions for affine mapping const double w0 = 1.0 - X_%(i)d[%(j)s][0] - X_%(i)d[%(j)s][1]; const double w1 = X_%(i)d[%(j)s][0]; const double w2 = X_%(i)d[%(j)s][1]; // Compute affine mapping y = F(X) y[0] = w0*vertex_coordinates[0] + w1*vertex_coordinates[2] + w2*vertex_coordinates[4]; y[1] = w0*vertex_coordinates[1] + w1*vertex_coordinates[3] + w2*vertex_coordinates[5];""" _map_onto_physical_2D_1D = """\ // Evaluate basis functions for affine mapping const double w0 = 1.0 - X_%(i)d[%(j)s][0]; const double w1 = X_%(i)d[%(j)s][0]; // Compute affine mapping y = F(X) y[0] = w0*vertex_coordinates[0] + w1*vertex_coordinates[2]; y[1] = w0*vertex_coordinates[1] + w1*vertex_coordinates[3];""" _map_onto_physical_3D = """\ // Evaluate basis functions for affine mapping const double w0 = 1.0 - X_%(i)d[%(j)s][0] - X_%(i)d[%(j)s][1] - X_%(i)d[%(j)s][2]; const double w1 = X_%(i)d[%(j)s][0]; const double w2 = X_%(i)d[%(j)s][1]; const double w3 = X_%(i)d[%(j)s][2]; // Compute affine mapping y = F(X) y[0] = w0*vertex_coordinates[0] + w1*vertex_coordinates[3] + w2*vertex_coordinates[6] + w3*vertex_coordinates[9]; y[1] = w0*vertex_coordinates[1] + w1*vertex_coordinates[4] + w2*vertex_coordinates[7] + w3*vertex_coordinates[10]; y[2] = w0*vertex_coordinates[2] + w1*vertex_coordinates[5] + w2*vertex_coordinates[8] + w3*vertex_coordinates[11];""" _map_onto_physical_3D_1D = """\ // Evaluate basis functions for affine mapping const double w0 = 1.0 - X_%(i)d[%(j)s][0]; const double w1 = X_%(i)d[%(j)s][0]; // Compute affine mapping y = F(X) y[0] = w0*vertex_coordinates[0] + w1*vertex_coordinates[3]; y[1] = w0*vertex_coordinates[1] + w1*vertex_coordinates[4]; y[2] = w0*vertex_coordinates[2] + w1*vertex_coordinates[5];""" _map_onto_physical_3D_2D = """\ // Evaluate basis functions for affine mapping const double w0 = 1.0 - X_%(i)d[%(j)s][0] - X_%(i)d[%(j)s][1]; const double w1 = X_%(i)d[%(j)s][0]; const double w2 = X_%(i)d[%(j)s][1]; // Compute affine mapping y = F(X) y[0] = w0*vertex_coordinates[0] + w1*vertex_coordinates[3] + w2*vertex_coordinates[6]; y[1] = w0*vertex_coordinates[1] + w1*vertex_coordinates[4] + w2*vertex_coordinates[7]; y[2] = w0*vertex_coordinates[2] + w1*vertex_coordinates[5] + w2*vertex_coordinates[8]; """ _ip_coordinates_1D = """\ X%(num_ip)d[0] = %(name)s[%(ip)s][0]*vertex_coordinates%(restriction)s[0] + \ %(name)s[%(ip)s][1]*vertex_coordinates%(restriction)s[1];""" _ip_coordinates_2D = """\ X%(num_ip)d[0] = %(name)s[%(ip)s][0]*vertex_coordinates%(restriction)s[0] + \ %(name)s[%(ip)s][1]*vertex_coordinates%(restriction)s[2] + %(name)s[%(ip)s][2]*vertex_coordinates%(restriction)s[4]; X%(num_ip)d[1] = %(name)s[%(ip)s][0]*vertex_coordinates%(restriction)s[1] + \ %(name)s[%(ip)s][1]*vertex_coordinates%(restriction)s[3] + %(name)s[%(ip)s][2]*vertex_coordinates%(restriction)s[5];""" _ip_coordinates_3D = """\ X%(num_ip)d[0] = %(name)s[%(ip)s][0]*vertex_coordinates%(restriction)s[0] + \ %(name)s[%(ip)s][1]*vertex_coordinates%(restriction)s[3] + \ %(name)s[%(ip)s][2]*vertex_coordinates%(restriction)s[6] + \ %(name)s[%(ip)s][3]*vertex_coordinates%(restriction)s[9]; X%(num_ip)d[1] = %(name)s[%(ip)s][0]*vertex_coordinates%(restriction)s[1] + \ %(name)s[%(ip)s][1]*vertex_coordinates%(restriction)s[4] + \ %(name)s[%(ip)s][2]*vertex_coordinates%(restriction)s[7] + \ %(name)s[%(ip)s][3]*vertex_coordinates%(restriction)s[10]; X%(num_ip)d[2] = %(name)s[%(ip)s][0]*vertex_coordinates%(restriction)s[2] + \ %(name)s[%(ip)s][1]*vertex_coordinates%(restriction)s[5] + \ %(name)s[%(ip)s][2]*vertex_coordinates%(restriction)s[8] + \ %(name)s[%(ip)s][3]*vertex_coordinates%(restriction)s[11];""" # Codesnippets used in evaluatebasis[|derivatives] _map_coordinates_FIAT_interval = """\ // Get coordinates and map to the reference (FIAT) element double X = (2.0*x[0] - vertex_coordinates[0] - vertex_coordinates[1]) / J[0];""" _map_coordinates_FIAT_interval_in_2D = """\ // Get coordinates and map to the reference (FIAT) element double X = 2*(std::sqrt(std::pow(x[0] - vertex_coordinates[0], 2) + std::pow(x[1] - vertex_coordinates[1], 2)) / detJ) - 1.0;""" _map_coordinates_FIAT_interval_in_3D = """\ // Get coordinates and map to the reference (FIAT) element double X = 2*(std::sqrt(std::pow(x[0] - vertex_coordinates[0], 2) + std::pow(x[1] - vertex_coordinates[1], 2) + std::pow(x[2] - vertex_coordinates[2], 2))/ detJ) - 1.0;""" _map_coordinates_FIAT_triangle = """\ // Compute constants const double C0 = vertex_coordinates[2] + vertex_coordinates[4]; const double C1 = vertex_coordinates[3] + vertex_coordinates[5]; // Get coordinates and map to the reference (FIAT) element double X = (J[1]*(C1 - 2.0*x[1]) + J[3]*(2.0*x[0] - C0)) / detJ; double Y = (J[0]*(2.0*x[1] - C1) + J[2]*(C0 - 2.0*x[0])) / detJ;""" _map_coordinates_FIAT_triangle_in_3D = """\ const double b0 = vertex_coordinates[0]; const double b1 = vertex_coordinates[1]; const double b2 = vertex_coordinates[2]; // P_FFC = J^dag (p - b), P_FIAT = 2*P_FFC - (1, 1) double X = 2*(K[0]*(x[0] - b0) + K[1]*(x[1] - b1) + K[2]*(x[2] - b2)) - 1.0; double Y = 2*(K[3]*(x[0] - b0) + K[4]*(x[1] - b1) + K[5]*(x[2] - b2)) - 1.0; """ _map_coordinates_FIAT_tetrahedron = """\ // Compute constants const double C0 = vertex_coordinates[9] + vertex_coordinates[6] + vertex_coordinates[3] - vertex_coordinates[0]; const double C1 = vertex_coordinates[10] + vertex_coordinates[7] + vertex_coordinates[4] - vertex_coordinates[1]; const double C2 = vertex_coordinates[11] + vertex_coordinates[8] + vertex_coordinates[5] - vertex_coordinates[2]; // Compute subdeterminants const double d_00 = J[4]*J[8] - J[5]*J[7]; const double d_01 = J[5]*J[6] - J[3]*J[8]; const double d_02 = J[3]*J[7] - J[4]*J[6]; const double d_10 = J[2]*J[7] - J[1]*J[8]; const double d_11 = J[0]*J[8] - J[2]*J[6]; const double d_12 = J[1]*J[6] - J[0]*J[7]; const double d_20 = J[1]*J[5] - J[2]*J[4]; const double d_21 = J[2]*J[3] - J[0]*J[5]; const double d_22 = J[0]*J[4] - J[1]*J[3]; // Get coordinates and map to the reference (FIAT) element double X = (d_00*(2.0*x[0] - C0) + d_10*(2.0*x[1] - C1) + d_20*(2.0*x[2] - C2)) / detJ; double Y = (d_01*(2.0*x[0] - C0) + d_11*(2.0*x[1] - C1) + d_21*(2.0*x[2] - C2)) / detJ; double Z = (d_02*(2.0*x[0] - C0) + d_12*(2.0*x[1] - C1) + d_22*(2.0*x[2] - C2)) / detJ; """ # Mappings to code snippets used by format These dictionaries accept # as keys: first the topological dimension, and second the geometric # dimension facet_determinant = {1: {1: _facet_determinant_1D, 2: _facet_determinant_2D_1D, 3: _facet_determinant_3D_1D}, 2: {2: _facet_determinant_2D, 3: _facet_determinant_3D_2D}, 3: {3: _facet_determinant_3D}} # Geometry related snippets map_onto_physical = {1: {1: _map_onto_physical_1D, 2: _map_onto_physical_2D_1D, 3: _map_onto_physical_3D_1D}, 2: {2: _map_onto_physical_2D, 3: _map_onto_physical_3D_2D}, 3: {3: _map_onto_physical_3D}} fiat_coordinate_map = {"interval": {1:_map_coordinates_FIAT_interval, 2:_map_coordinates_FIAT_interval_in_2D, 3:_map_coordinates_FIAT_interval_in_3D}, "triangle": {2:_map_coordinates_FIAT_triangle, 3: _map_coordinates_FIAT_triangle_in_3D}, "tetrahedron": {3:_map_coordinates_FIAT_tetrahedron}} transform_snippet = {"interval": {1: _transform_snippet(1, 1), 2: _transform_snippet(1, 2), 3: _transform_snippet(1, 3)}, "triangle": {2: _transform_snippet(2, 2), 3: _transform_snippet(2, 3)}, "tetrahedron": {3: _transform_snippet(3, 3)}} ip_coordinates = {1: (3, _ip_coordinates_1D), 2: (10, _ip_coordinates_2D), 3: (21, _ip_coordinates_3D)} # FIXME: Rename as in compute_jacobian _compute_foo__d normal_direction = {1: {1: _normal_direction_1D, 2: _normal_direction_2D_1D, 3: _normal_direction_3D_1D}, 2: {2: _normal_direction_2D, 3: _normal_direction_3D_2D}, 3: {3: _normal_direction_3D}} facet_normal = {1: {1: _facet_normal_1D, 2: _facet_normal_2D_1D, 3: _facet_normal_3D_1D}, 2: {2: _facet_normal_2D, 3: _facet_normal_3D_2D}, 3: {3: _facet_normal_3D}} cell_volume = {1: {1: _cell_volume_1D, 2: _cell_volume_2D_1D, 3: _cell_volume_3D_1D}, 2: {2: _cell_volume_2D, 3: _cell_volume_3D_2D}, 3: {3: _cell_volume_3D}} circumradius = {1: {1: _circumradius_1D, 2: _circumradius_2D_1D, 3: _circumradius_3D_1D}, 2: {2: _circumradius_2D, 3: _circumradius_3D_2D}, 3: {3: _circumradius_3D}} facet_area = {1: {1: _facet_area_1D, 2: _facet_area_2D_1D, 3: _facet_area_3D_1D}, 2: {2: _facet_area_2D, 3: _facet_area_3D_2D}, 3: {3: _facet_area_3D}} min_facet_edge_length = {3: {3: _min_facet_edge_length_3D}} max_facet_edge_length = {3: {3: _max_facet_edge_length_3D}} # Code snippets for runtime quadrature (calling evaluate_basis) eval_basis_decl = """\ std::vector > %(table_name)s(num_quadrature_points);""" eval_basis_init = """\ for (std::size_t ip = 0; ip < num_quadrature_points; ip++) %(table_name)s[ip].resize(%(table_size)s);""" eval_basis = """\ // Get current quadrature point and compute values of basis functions const double* x = quadrature_points + ip*%(gdim)s; const double* v = vertex_coordinates + %(vertex_offset)s; %(form_prefix)s_finite_element_%(element_number)s::_evaluate_basis_all(%(eval_name)s, x, v, cell_orientation);""" eval_basis_copy = """\ // Copy values to table %(table_name)s for (std::size_t i = 0; i < %(space_dim)s; i++) %(table_name)s[ip][%(table_offset)s + i] = %(eval_name)s[%(eval_stride)s*i + %(eval_offset)s];""" eval_derivs_decl = """\ std::vector > %(table_name)s(num_quadrature_points);""" eval_derivs_init = """\ for (std::size_t ip = 0; ip < num_quadrature_points; ip++) %(table_name)s[ip].resize(%(table_size)s);""" eval_derivs = """\ // Get current quadrature point and compute values of basis function derivatives const double* x = quadrature_points + ip*%(gdim)s; const double* v = vertex_coordinates + %(vertex_offset)s; %(form_prefix)s_finite_element_%(element_number)s::_evaluate_basis_derivatives_all(%(n)s, %(eval_name)s, x, v, cell_orientation);""" eval_derivs_copy = """\ // Copy values to table %(table_name)s for (std::size_t i = 0; i < %(space_dim)s; i++) %(table_name)s[ip][%(table_offset)s + i] = %(eval_name)s[%(eval_stride)s*i + %(eval_offset)s];""" ffc-1.6.0/ffc/compiler.py000066400000000000000000000176161255571034100152140ustar00rootroot00000000000000""" This is the compiler, acting as the main interface for compilation of forms and breaking the compilation into several sequential stages. The output of each stage is the input of the next stage. Compiler stage 0: Language, parsing ----------------------------------- Input: Python code or .ufl file Output: UFL form This stage consists of parsing and expressing a form in the UFL form language. This stage is completely handled by UFL. Compiler stage 1: Analysis -------------------------- Input: UFL form Output: Preprocessed UFL form and FormData (metadata) This stage preprocesses the UFL form and extracts form metadata. It may also perform simplifications on the form. Compiler stage 2: Code representation ------------------------------------- Input: Preprocessed UFL form and FormData (metadata) Output: Intermediate Representation (IR) This stage examines the input and generates all data needed for code generation. This includes generation of finite element basis functions, extraction of data for mapping of degrees of freedom and possible precomputation of integrals. Most of the complexity of compilation is handled in this stage. The IR is stored as a dictionary, mapping names of UFC functions to data needed for generation of the corresponding code. Compiler stage 3: Optimization ------------------------------ Input: Intermediate Representation (IR) Output: Optimized Intermediate Representation (OIR) This stage examines the IR and performs optimizations. Optimization is currently disabled as a separate stage but is implemented as part of the code generation for quadrature representation. Compiler stage 4: Code generation --------------------------------- Input: Optimized Intermediate Representation (OIR) Output: C++ code This stage examines the OIR and generates the actual C++ code for the body of each UFC function. The code is stored as a dictionary, mapping names of UFC functions to strings containing the C++ code of the body of each function. Compiler stage 5: Code formatting --------------------------------- Input: C++ code Output: C++ code files This stage examines the generated C++ code and formats it according to the UFC format, generating as output one or more .h/.cpp files conforming to the UFC format. The main interface is defined by the following two functions: compile_form compile_element The compiler stages are implemented by the following functions: analyze_forms or analyze_elements (stage 1) compute_ir (stage 2) optimize_ir (stage 3) generate_code (stage 4) format_code (stage 5) """ # Copyright (C) 2007-2015 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Kristian B. Oelgaard, 2010. # Modified by Dag Lindbo, 2008. # Modified by Garth N. Wells, 2009. # Modified by Martin Alnaes, 2013-2015 __all__ = ["compile_form", "compile_element"] # Python modules from time import time import os # FFC modules from ffc.log import info, info_green, warning from ffc.parameters import default_parameters # FFC modules from ffc.analysis import analyze_forms, analyze_elements from ffc.representation import compute_ir from ffc.optimization import optimize_ir from ffc.codegeneration import generate_code from ffc.formatting import format_code, write_code from ffc.wrappers import generate_wrapper_code def compile_form(forms, object_names=None, prefix="Form", parameters=None): """This function generates UFC code for a given UFL form or list of UFL forms.""" info("Compiling form %s\n" % prefix) # Reset timing cpu_time_0 = time() # Check input arguments forms = _check_forms(forms) if not forms: return if prefix != os.path.basename(prefix): prefix = os.path.basename(prefix) warning("Invalid prefix, modified to {}.".format(prefix)) if object_names is None: object_names = {} parameters = _check_parameters(parameters) # Stage 1: analysis cpu_time = time() analysis = analyze_forms(forms, parameters) _print_timing(1, time() - cpu_time) # Stage 2: intermediate representation cpu_time = time() ir = compute_ir(analysis, parameters) _print_timing(2, time() - cpu_time) # Stage 3: optimization cpu_time = time() oir = optimize_ir(ir, parameters) _print_timing(3, time() - cpu_time) # Stage 4: code generation cpu_time = time() code = generate_code(oir, prefix, parameters) _print_timing(4, time() - cpu_time) # Stage 4.1: generate wrappers cpu_time = time() wrapper_code = generate_wrapper_code(analysis, prefix, object_names, parameters) _print_timing(4.1, time() - cpu_time) # Stage 5: format code cpu_time = time() code_h, code_c = format_code(code, wrapper_code, prefix, parameters) write_code(code_h, code_c, prefix, parameters) # FIXME: Don't write to file in this function (issue #72) _print_timing(5, time() - cpu_time) info_green("FFC finished in %g seconds.", time() - cpu_time_0) def compile_element(elements, prefix="Element", parameters=None): """This function generates UFC code for a given UFL element or list of UFL elements.""" info("Compiling element %s\n" % prefix) # Reset timing cpu_time_0 = time() # Check input arguments elements = _check_elements(elements) if not elements: return parameters = _check_parameters(parameters) # Stage 1: analysis cpu_time = time() analysis = analyze_elements(elements, parameters) _print_timing(1, time() - cpu_time) # Stage 2: intermediate representation cpu_time = time() ir = compute_ir(analysis, parameters) _print_timing(2, time() - cpu_time) # Stage 3: optimization cpu_time = time() oir = optimize_ir(ir, parameters) _print_timing(3, time() - cpu_time) # Stage 4: code generation cpu_time = time() code = generate_code(oir, prefix, parameters) _print_timing(4, time() - cpu_time) # Stage 4.1: generate wrappers cpu_time = time() object_names = {} wrapper_code = generate_wrapper_code(analysis, prefix, object_names, parameters) _print_timing(4.1, time() - cpu_time) # Stage 5: format code cpu_time = time() code_h, code_c = format_code(code, wrapper_code, prefix, parameters) write_code(code_h, code_c, prefix, parameters) # FIXME: Don't write to file in this function (issue #72) _print_timing(5, time() - cpu_time) info_green("FFC finished in %g seconds.", time() - cpu_time_0) def _check_forms(forms): "Initial check of forms." if not isinstance(forms, (list, tuple)): forms = (forms,) return forms def _check_elements(elements): "Initial check of elements." if not isinstance(elements, (list, tuple)): elements = (elements,) return elements def _check_parameters(parameters): "Initial check of parameters." if parameters is None: parameters = default_parameters() if "blas" in parameters: warning("BLAS mode unavailable (will return in a future version).") if "quadrature_points" in parameters: warning("Option 'quadrature_points' has been replaced by 'quadrature_degree'.") return parameters def _print_timing(stage, timing): "Print timing results." info("Compiler stage %s finished in %g seconds.\n" % (str(stage), timing)) ffc-1.6.0/ffc/cpp.py000066400000000000000000001001071255571034100141500ustar00rootroot00000000000000"This module defines rules and algorithms for generating C++ code." # Copyright (C) 2009-2015 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Kristian B. Oelgaard 2011 # Modified by Marie E. Rognes 2010 # Modified by Martin Alnaes 2013-2015 # Python modules import re, numpy, platform # FFC modules from ffc.log import debug, error from six.moves import zip # Mapping of restrictions _fixed_map = {None: "", "+": "_0", "-": "_1"} _choose_map = lambda r: _fixed_map[r] if r in _fixed_map else "_%s" % str(r) # FIXME: MSA: Using a dict to collect functions in a namespace is weird # and makes the code harder to follow, change to a class # with member functions instead! # FIXME: KBO: format is a builtin_function, i.e., we should use a different name. # Formatting rules format = {} # Program flow format.update({ "return": lambda v: "return %s;" % str(v), "grouping": lambda v: "(%s)" % v, "block": lambda v: "{%s}" % v, "block begin": "{", "block end": "}", "list": lambda v: format["block"](format["list separator"].join([str(l) for l in v])), "switch": lambda v, cases, default=None, numbers=None: _generate_switch(v, cases, default, numbers), "exception": lambda v: "throw std::runtime_error(\"%s\");" % v, "warning": lambda v: 'std::cerr << "*** FFC warning: " << "%s" << std::endl;' % v, "comment": lambda v: "// %s" % v, "if": lambda c, v: "if (%s)\n{\n%s\n}\n" % (c, v), "loop": lambda i, j, k: "for (unsigned int %s = %s; %s < %s; %s++)"% (i, j, i, k, i), "generate loop": lambda v, w, _indent=0: _generate_loop(v, w, _indent), "is equal": " == ", "not equal": " != ", "less than": " < ", "greater than": " > ", "less equal": " <= ", "greater equal": " >= ", "and": " && ", "or": " || ", "not": lambda v: "!(%s)" % v, "do nothing": "// Do nothing" }) # Declarations format.update({ "declaration": lambda t, n, v=None: _declaration(t, n, v), "float declaration": "double", "int declaration": "int", "uint declaration": "unsigned int", "static const uint declaration": "static const unsigned int", "static const float declaration": "static const double", "vector table declaration": "std::vector< std::vector >", "double array declaration": "double*", "const double array declaration": "const double*", "const float declaration": lambda v, w: "const double %s = %s;" % (v, w), "const uint declaration": lambda v, w: "const unsigned int %s = %s;" % (v, w), "dynamic array": lambda t, n, s: "%s *%s = new %s[%s];" % (t, n, t, s), "static array": lambda t, n, s: "static %s %s[%d];" % (t, n, s), "fixed array": lambda t, n, s: "%s %s[%d];" % (t, n, s), "delete dynamic array": lambda n, s=None: _delete_array(n, s), "create foo": lambda v: "new %s()" % v }) # Mathematical operators format.update({ "add": lambda v: " + ".join(v), "iadd": lambda v, w: "%s += %s;" % (str(v), str(w)), "sub": lambda v: " - ".join(v), "neg": lambda v: "-%s" % v, "mul": lambda v: "*".join(v), "imul": lambda v, w: "%s *= %s;" % (str(v), str(w)), "div": lambda v, w: "%s/%s" % (str(v), str(w)), "inverse": lambda v: "(1.0/%s)" % v, "std power": lambda base, exp: "std::pow(%s, %s)" % (base, exp), "exp": lambda v: "std::exp(%s)" % str(v), "ln": lambda v: "std::log(%s)" % str(v), "cos": lambda v: "std::cos(%s)" % str(v), "sin": lambda v: "std::sin(%s)" % str(v), "tan": lambda v: "std::tan(%s)" % str(v), "cosh": lambda v: "std::cosh(%s)" % str(v), "sinh": lambda v: "std::sinh(%s)" % str(v), "tanh": lambda v: "std::tanh(%s)" % str(v), "acos": lambda v: "std::acos(%s)" % str(v), "asin": lambda v: "std::asin(%s)" % str(v), "atan": lambda v: "std::atan(%s)" % str(v), "atan_2": lambda v1,v2: "std::atan2(%s,%s)" % (str(v1),str(v2)), "erf": lambda v: "erf(%s)" % str(v), "bessel_i": lambda v, n: "boost::math::cyl_bessel_i(%s, %s)" % (str(n), str(v)), "bessel_j": lambda v, n: "boost::math::cyl_bessel_j(%s, %s)" % (str(n), str(v)), "bessel_k": lambda v, n: "boost::math::cyl_bessel_k(%s, %s)" % (str(n), str(v)), "bessel_y": lambda v, n: "boost::math::cyl_neumann(%s, %s)" % (str(n), str(v)), "absolute value": lambda v: "std::abs(%s)" % str(v), "min value": lambda l, r: "std::min(%s, %s)" % (str(l), str(r)), "max value": lambda l, r: "std::max(%s, %s)" % (str(l), str(r)), "sqrt": lambda v: "std::sqrt(%s)" % str(v), "addition": lambda v: _add(v), "multiply": lambda v: _multiply(v), "power": lambda base, exp: _power(base, exp), "inner product": lambda v, w: _inner_product(v, w), "assign": lambda v, w: "%s = %s;" % (v, str(w)), "component": lambda v, k: _component(v, k) }) # Formatting used in tabulate_tensor format.update({ "geometry tensor": lambda j, a: "G%d_%s" % (j, "_".join(["%d" % i for i in a])) }) # Geometry related variable names (from code snippets). format.update({ "entity index": "c.entity_indices", "num entities": "num_global_entities", "cell": lambda s: "ufc::%s" % s, "J": lambda i, j, m, n: "J[%d]" % _flatten(i, j, m, n), "inv(J)": lambda i, j, m, n: "K[%d]" % _flatten(i, j, m, n), "det(J)": lambda r=None: "detJ%s" % _choose_map(r), "cell volume": lambda r=None: "volume%s" % _choose_map(r), "circumradius": lambda r=None: "circumradius%s" % _choose_map(r), "facet area": "facet_area", "min facet edge length": lambda r: "min_facet_edge_length", "max facet edge length": lambda r: "max_facet_edge_length", "scale factor": "det", "transform": lambda t, i, j, m, n, r: _transform(t, i, j, m, n, r), "normal component": lambda r, j: "n%s%s" % (_choose_map(r), j), "x coordinate": "X", "y coordinate": "Y", "z coordinate": "Z", "ip coordinates": lambda i, j: "X%d[%d]" % (i, j), "affine map table": lambda i, j: "FEA%d_f%d" % (i, j), "vertex_coordinates": lambda r=None: "vertex_coordinates%s" % _choose_map(r) }) # UFC function arguments and class members (names) format.update({ "element tensor": lambda i: "A[%s]" % i, "element tensor term": lambda i, j: "A%d[%s]" % (j, i), "coefficient": lambda j, k: format["component"]("w", [j, k]), "argument basis num": "i", "argument derivative order": "n", "argument values": "values", "argument coordinates": "dof_coordinates", "facet": lambda r: "facet%s" % _choose_map(r), "vertex": "vertex", "argument axis": "i", "argument dimension": "d", "argument entity": "i", "member global dimension": "_global_dimension", "argument dofs": "dofs", "argument dof num": "i", "argument dof values": "dof_values", "argument vertex values": "vertex_values", "argument sub": "i", # sub element "argument subdomain": "subdomain_id", # sub domain }) # Formatting used in evaluatedof. format.update({ "dof vals": "vals", "dof result": "result", "dof X": lambda i: "X_%d" % i, "dof D": lambda i: "D_%d" % i, "dof W": lambda i: "W_%d" % i, "dof copy": lambda i: "copy_%d" % i, "dof physical coordinates": "y" }) # Formatting used in evaluate_basis, evaluate_basis_derivatives and quadrature # code generators. format.update({ # evaluate_basis and evaluate_basis_derivatives "tmp value": lambda i: "tmp%d" % i, "tmp ref value": lambda i: "tmp_ref%d" % i, "local dof": "dof", "basisvalues": "basisvalues", "coefficients": lambda i: "coefficients%d" %(i), "num derivatives": lambda t_or_g :"num_derivatives" + t_or_g, "derivative combinations": lambda t_or_g :"combinations" + t_or_g, "transform matrix": "transform", "transform Jinv": "Jinv", "dmats": lambda i: "dmats%s" %(i), "dmats old": "dmats_old", "reference derivatives": "derivatives", "dof values": "dof_values", "dof map if": lambda i,j: "%d <= %s && %s <= %d"\ % (i, format["argument basis num"], format["argument basis num"], j), "dereference pointer": lambda n: "*%s" % n, "reference variable": lambda n: "&%s" % n, "call basis": lambda i, s: "_evaluate_basis(%s, %s, x, vertex_coordinates, cell_orientation);" % (i, s), "call basis_all": "_evaluate_basis_all(values, x, vertex_coordinates, cell_orientation);", "call basis_derivatives": lambda i, s: "_evaluate_basis_derivatives(%s, n, %s, x, vertex_coordinates, cell_orientation);" % (i, s), "call basis_derivatives_all": lambda i, s: "_evaluate_basis_derivatives_all(n, %s, x, vertex_coordinates, cell_orientation);" % s, # quadrature code generators "integration points": "ip", "first free index": "j", "second free index": "k", "geometry constant": lambda i: "G[%d]" % i, "ip constant": lambda i: "I[%d]" % i, "basis constant": lambda i: "B[%d]" % i, "conditional": lambda i: "C[%d]" % i, "evaluate conditional": lambda i,j,k: "(%s) ? %s : %s" % (i,j,k), # "geometry constant": lambda i: "G%d" % i, # "ip constant": lambda i: "I%d" % i, # "basis constant": lambda i: "B%d" % i, "function value": lambda i: "F%d" % i, "nonzero columns": lambda i: "nzc%d" % i, "weight": lambda i: "W" if i is None else "W%d" % (i), "psi name": lambda c, et, e, co, d, a: _generate_psi_name(c, et, e, co, d, a), # both "free indices": ["r","s","t","u"], "matrix index": lambda i, j, range_j: _matrix_index(i, str(j), str(range_j)), "quadrature point": lambda i, gdim: "quadrature_points + %s*%d" % (i, gdim), "facet_normal_custom": lambda gdim: _generate_facet_normal_custom(gdim), }) # Misc format.update({ "bool": lambda v: {True: "true", False: "false"}[v], "str": lambda v: "%s" % v, "int": lambda v: "%d" % v, "list separator": ", ", "block separator": ",\n", "new line": "\\\n", "tabulate tensor": lambda m: _tabulate_tensor(m), }) # Code snippets from ffc.codesnippets import * format.update({ "compute_jacobian": lambda tdim, gdim, r=None: \ compute_jacobian[tdim][gdim] % {"restriction": _choose_map(r)}, "compute_jacobian_inverse": lambda tdim, gdim, r=None: \ compute_jacobian_inverse[tdim][gdim] % {"restriction": _choose_map(r)}, "orientation": lambda tdim, gdim, r=None: orientation_snippet % {"restriction": _choose_map(r)} if tdim != gdim else "", "facet determinant": lambda tdim, gdim, r=None: facet_determinant[tdim][gdim] % {"restriction": _choose_map(r)}, "fiat coordinate map": lambda cell, gdim: fiat_coordinate_map[cell][gdim], "generate normal": lambda tdim, gdim, i: _generate_normal(tdim, gdim, i), "generate cell volume": lambda tdim, gdim, i, r=None: _generate_cell_volume(tdim, gdim, i, r), "generate circumradius": lambda tdim, gdim, i, r=None: _generate_circumradius(tdim, gdim, i, r), "generate facet area": lambda tdim, gdim: facet_area[tdim][gdim], "generate min facet edge length": lambda tdim, gdim, r=None: min_facet_edge_length[tdim][gdim] % {"restriction": _choose_map(r)}, "generate max facet edge length": lambda tdim, gdim, r=None: max_facet_edge_length[tdim][gdim] % {"restriction": _choose_map(r)}, "generate ip coordinates": lambda g, num_ip, name, ip, r=None: (ip_coordinates[g][0], ip_coordinates[g][1] % \ {"restriction": _choose_map(r), "ip": ip, "name": name, "num_ip": num_ip}), "scale factor snippet": scale_factor, "map onto physical": map_onto_physical, "evaluate basis snippet": eval_basis, "combinations": combinations_snippet, "transform snippet": transform_snippet, "evaluate function": evaluate_f, "ufc comment": comment_ufc, "dolfin comment": comment_dolfin, "header_h": header_h, "header_c": header_c, "footer": footer, "eval_basis_decl": eval_basis_decl, "eval_basis_init": eval_basis_init, "eval_basis": eval_basis, "eval_basis_copy": eval_basis_copy, "eval_derivs_decl": eval_derivs_decl, "eval_derivs_init": eval_derivs_init, "eval_derivs": eval_derivs, "eval_derivs_copy": eval_derivs_copy, "extract_cell_coordinates": lambda offset, r : "const double* vertex_coordinates_%d = vertex_coordinates + %d;" % (r, offset) }) # Class names format.update({ "classname finite_element": lambda prefix, i:\ "%s_finite_element_%d" % (prefix.lower(), i), "classname dofmap": lambda prefix, i: "%s_dofmap_%d" % (prefix.lower(), i), "classname cell_integral": lambda prefix, form_id, sub_domain:\ "%s_cell_integral_%d_%s" % (prefix.lower(), form_id, sub_domain), "classname exterior_facet_integral": lambda prefix, form_id, sub_domain:\ "%s_exterior_facet_integral_%d_%s" % (prefix.lower(), form_id, sub_domain), "classname interior_facet_integral": lambda prefix, form_id, sub_domain:\ "%s_interior_facet_integral_%d_%s" % (prefix.lower(), form_id, sub_domain), "classname vertex_integral": lambda prefix, form_id, sub_domain:\ "%s_vertex_integral_%d_%s" % (prefix.lower(), form_id, sub_domain), "classname custom_integral": lambda prefix, form_id, sub_domain:\ "%s_custom_integral_%d_%s" % (prefix.lower(), form_id, sub_domain), "classname form": lambda prefix, i: "%s_form_%d" % (prefix.lower(), i) }) # Helper functions for formatting def _declaration(type, name, value=None): if value is None: return "%s %s;" % (type, name); return "%s %s = %s;" % (type, name, str(value)); def _component(var, k): if not isinstance(k, (list, tuple)): k = [k] return "%s" % var + "".join("[%s]" % str(i) for i in k) def _delete_array(name, size=None): if size is None: return "delete [] %s;" % name f_r = format["free indices"][0] code = format["generate loop"](["delete [] %s;" % format["component"](name, f_r)], [(f_r, 0, size)]) code.append("delete [] %s;" % name) return "\n".join(code) def _multiply(factors): """ Generate string multiplying a list of numbers or strings. If a factor is zero, the whole product is zero. Any factors equal to one are ignored. """ # FIXME: This could probably be way more robust and elegant. cpp_str = format["str"] non_zero_factors = [] for f in factors: # Round-off if f is smaller than epsilon if isinstance(f, (int, float)): if abs(f) < format["epsilon"]: return cpp_str(0) if abs(f - 1.0) < format["epsilon"]: continue # Convert to string f = cpp_str(f) # Return zero if any factor is zero if f == "0": return cpp_str(0) # If f is 1, don't add it to list of factors if f == "1": continue # If sum-like, parentheseze factor if "+" in f or "-" in f: f = "(%s)" % f non_zero_factors += [f] if len(non_zero_factors) == 0: return cpp_str(1.0) return "*".join(non_zero_factors) def _add(terms): "Generate string summing a list of strings." # FIXME: Subtract absolute value of negative numbers result = " + ".join([str(t) for t in terms if (str(t) != "0")]) if result == "": return format["str"](0) return result def _power(base, exponent): "Generate code for base^exponent." if exponent >= 0: return _multiply(exponent*(base,)) else: return "1.0 / (%s)" % _power(base, -exponent) def _inner_product(v, w): "Generate string for v[0]*w[0] + ... + v[n]*w[n]." # Check that v and w have same length assert(len(v) == len(w)), "Sizes differ in inner-product!" # Special case, zero terms if len(v) == 0: return format["float"](0) # Straightforward handling when we only have strings if isinstance(v[0], str): return _add([_multiply([v[i], w[i]]) for i in range(len(v))]) # Fancy handling of negative numbers etc result = None eps = format["epsilon"] add = format["add"] sub = format["sub"] neg = format["neg"] mul = format["mul"] fl = format["float"] for (c, x) in zip(v, w): if result: if abs(c - 1.0) < eps: result = add([result, x]) elif abs(c + 1.0) < eps: result = sub([result, x]) elif c > eps: result = add([result, mul([fl(c), x])]) elif c < -eps: result = sub([result, mul([fl(-c), x])]) else: if abs(c - 1.0) < eps: result = x elif abs(c + 1.0) < eps: result = neg(x) elif c > eps: result = mul([fl(c), x]) elif c < -eps: result = neg(mul([fl(-c), x])) return result def _transform(type, i, j, m, n, r): map_name = {"J": "J", "JINV": "K"}[type] + _choose_map(r) return (map_name + "[%d]") % _flatten(i, j, m, n) # FIXME: Input to _generate_switch should be a list of tuples (i, case) def _generate_switch(variable, cases, default=None, numbers=None): "Generate switch statement from given variable and cases" # Special case: no cases and no default if len(cases) == 0 and default is None: return format["do nothing"] elif len(cases) == 0: return default # Special case: one case and no default if len(cases) == 1 and default is None: return cases[0] # Create numbers for switch if numbers is None: numbers = list(range(len(cases))) # Create switch code = "switch (%s)\n{\n" % variable for (i, case) in enumerate(cases): code += "case %d:\n {\n %s\n break;\n }\n" % (numbers[i], indent(case, 2)) code += "}\n" # Default value if default: code += "\n" + default return code def _tabulate_tensor(vals): "Tabulate a multidimensional tensor. (Replace tabulate_matrix and tabulate_vector)." # Prefetch formats to speed up code generation f_block = format["block"] f_list_sep = format["list separator"] f_block_sep = format["block separator"] # FIXME: KBO: Change this to "float" once issue in set_float_formatting is fixed. f_float = format["floating point"] f_epsilon = format["epsilon"] # Create numpy array and get shape. tensor = numpy.array(vals) shape = numpy.shape(tensor) if len(shape) == 1: # Create zeros if value is smaller than tolerance. values = [] for v in tensor: if abs(v) < f_epsilon: values.append(f_float(0.0)) else: values.append(f_float(v)) # Format values. return f_block(f_list_sep.join(values)) elif len(shape) > 1: return f_block(f_block_sep.join([_tabulate_tensor(tensor[i]) for i in range(shape[0])])) else: error("Not an N-dimensional array:\n%s" % tensor) def _generate_loop(lines, loop_vars, _indent): "This function generates a loop over a vector or matrix." # Prefetch formats to speed up code generation. f_loop = format["loop"] f_begin = format["block begin"] f_end = format["block end"] f_comment = format["comment"] if not loop_vars: return lines code = [] for ls in loop_vars: # Get index and lower and upper bounds. index, lower, upper = ls # Loop index. code.append(indent(f_loop(index, lower, upper), _indent)) code.append(indent(f_begin, _indent)) # Increase indentation. _indent += 2 # If this is the last loop, write values. if index == loop_vars[-1][0]: for l in lines: code.append(indent(l, _indent)) # Decrease indentation and write end blocks. indices = [var[0] for var in loop_vars] indices.reverse() for index in indices: _indent -= 2 code.append(indent(f_end + " " + f_comment("end loop over '%s'" % index), _indent)) return code def _matrix_index(i, j, range_j): "Map the indices in a matrix to an index in an array i.e., m[i][j] -> a[i*range(j)+j]" if i == 0: access = j elif i == 1: access = format["add"]([range_j, j]) else: irj = format["mul"]([format["str"](i), range_j]) access = format["add"]([irj, j]) return access def _generate_psi_name(counter, entity_type, entity, component, derivatives, avg): """Generate a name for the psi table of the form: FE#_f#_v#_C#_D###_A#, where '#' will be an integer value. FE - is a simple counter to distinguish the various bases, it will be assigned in an arbitrary fashion. f - denotes facets if applicable, range(element.num_facets()). v - denotes vertices if applicable, range(num_vertices). C - is the component number if any (flattened in the case of tensor valued functions) D - is the number of derivatives in each spatial direction if any. If the element is defined in 3D, then D012 means d^3(*)/dydz^2. A - denotes averaged over cell (AC) or facet (AF) """ name = "FE%d" % counter if entity_type == "facet": if entity is None: name += "_f0" else: name += "_f%d" % entity elif entity_type == "vertex": name += "_v%d" % entity if component != () and component != []: name += "_C%d" % component if any(derivatives): name += "_D" + "".join(map(str,derivatives)) if avg == "cell": name += "_AC" elif avg == "facet": name += "_AF" return name def _generate_normal(tdim, gdim, integral_type, reference_normal=False): "Generate code for computing normal" # Choose snippets direction = normal_direction[tdim][gdim] assert (gdim in facet_normal[tdim]),\ "Facet normal not yet implemented for this tdim/gdim combo" normal = facet_normal[tdim][gdim] # Choose restrictions if integral_type == "exterior_facet": code = direction % {"restriction": "", "facet" : "facet"} code += normal % {"direction" : "", "restriction": ""} elif integral_type == "interior_facet": code = direction % {"restriction": _choose_map("+"), "facet": "facet_0"} code += normal % {"direction" : "", "restriction": _choose_map("+")} code += normal % {"direction" : "!", "restriction": _choose_map("-")} else: error("Unsupported integral_type: %s" % str(integral_type)) return code def _generate_facet_normal_custom(gdim): "Generate code for setting facet normal in custom integrals" code = format["comment"]("Set facet normal components for current quadrature point\n") for i in range(gdim): code += "const double n_0%d = facet_normals[%d*ip + %d];\n" % (i, gdim, i) code += "const double n_1%d = - facet_normals[%d*ip + %d];\n" % (i, gdim, i) return code def _generate_cell_volume(tdim, gdim, integral_type, r=None): "Generate code for computing cell volume." # Choose snippets volume = cell_volume[tdim][gdim] # Choose restrictions if integral_type in ("cell", "exterior_facet"): code = volume % {"restriction": ""} elif integral_type == "interior_facet": code = volume % {"restriction": _choose_map("+")} code += volume % {"restriction": _choose_map("-")} elif integral_type == "custom": code = volume % {"restriction": _choose_map(r)} else: error("Unsupported integral_type: %s" % str(integral_type)) return code def _generate_circumradius(tdim, gdim, integral_type, r=None): "Generate code for computing a cell's circumradius." # Choose snippets radius = circumradius[tdim][gdim] # Choose restrictions if integral_type in ("cell", "exterior_facet", "vertex"): code = radius % {"restriction": ""} elif integral_type == "interior_facet": code = radius % {"restriction": _choose_map("+")} code += radius % {"restriction": _choose_map("-")} elif integral_type == "custom": code = radius % {"restriction": _choose_map(r)} else: error("Unsupported integral_type: %s" % str(integral_type)) return code def _flatten(i, j, m, n): return i*n + j # Other functions def indent(block, num_spaces): "Indent each row of the given string block with n spaces." indentation = " " * num_spaces return indentation + ("\n" + indentation).join(block.split("\n")) def count_ops(code): "Count the number of operations in code (multiply-add pairs)." num_add = code.count(" + ") + code.count(" - ") num_multiply = code.count("*") + code.count("/") return (num_add + num_multiply) // 2 def set_float_formatting(precision): "Set floating point formatting based on precision." # Options for float formatting #f1 = "%%.%df" % precision #f2 = "%%.%de" % precision f1 = "%%.%dg" % precision f2 = "%%.%dg" % precision f_int = "%%.%df" % 1 eps = eval("1e-%s" % precision) # Regular float formatting def floating_point_regular(v): if abs(v - round(v, 1)) < eps: return f_int % v elif abs(v) < 100.0: return f1 % v else: return f2 % v # Special float formatting on Windows (remove extra leading zero) def floating_point_windows(v): return floating_point_regular(v).replace("e-0", "e-").replace("e+0", "e+") # Set float formatting if platform.system() == "Windows": format["float"] = floating_point_windows else: format["float"] = floating_point_regular # FIXME: KBO: Remove once we agree on the format of 'f1' format["floating point"] = format["float"] # Set machine precision format["epsilon"] = 10.0*eval("1e-%s" % precision) def set_exception_handling(convert_exceptions_to_warnings): "Set handling of exceptions." if convert_exceptions_to_warnings: format["exception"] = format["warning"] # Declarations to examine types = [["double"], ["const", "double"], ["const", "double", "*", "const", "*"], ["int"], ["const", "int"], ["unsigned", "int"], ["bool"], ["const", "bool"], ["static", "unsigned", "int"], ["const", "unsigned", "int"]] # Special characters and delimiters special_characters = ["+", "-", "*", "/", "=", ".", " ", ";", "(", ")", "\\", "{", "}", "[","]", "!"] def remove_unused(code, used_set=set()): """ Remove unused variables from a given C++ code. This is useful when generating code that will be compiled with gcc and parameters -Wall -Werror, in which case gcc returns an error when seeing a variable declaration for a variable that is never used. Optionally, a set may be specified to indicate a set of variables names that are known to be used a priori. """ # Dictionary of (declaration_line, used_lines) for variables variables = {} # List of variable names (so we can search them in order) variable_names = [] lines = code.split("\n") for (line_number, line) in enumerate(lines): # Exclude commented lines. if line[:2] == "//" or line[:3] == "///": continue # Split words words = [word for word in line.split(" ") if not word == ""] # Remember line where variable is declared for type in [type for type in types if " ".join(type) in " ".join(words)]: # Fewer matches than line below. # for type in [type for type in types if len(words) > len(type)]: variable_type = words[0:len(type)] variable_name = words[len(type)] # Skip special characters if variable_name in special_characters: continue # Test if any of the special characters are present in the variable name # If this is the case, then remove these by assuming that the 'real' name # is the first entry in the return list. This is implemented to prevent # removal of e.g. 'double array[6]' if it is later used in a loop as 'array[i]' if variable_type == type: # Create correct variable name (e.g. y instead of # y[2]) for variables with separators seps_present = [sep for sep in special_characters if sep in variable_name] if seps_present: variable_name = [variable_name.split(sep)[0] for sep in seps_present] variable_name.sort() variable_name = variable_name[0] variables[variable_name] = (line_number, []) if not variable_name in variable_names: variable_names += [variable_name] # Mark line for used variables for variable_name in variables: (declaration_line, used_lines) = variables[variable_name] if _variable_in_line(variable_name, line) and line_number > declaration_line: variables[variable_name] = (declaration_line, used_lines + [line_number]) # Reverse the order of the variable names to catch variables used # only by variables that are removed variable_names.reverse() # Remove declarations that are not used removed_lines = [] for variable_name in variable_names: (declaration_line, used_lines) = variables[variable_name] for line in removed_lines: if line in used_lines: used_lines.remove(line) if not used_lines and not variable_name in used_set: debug("Removing unused variable: %s" % variable_name) lines[declaration_line] = None # KBO: Need to completely remove line for evaluate_basis* to work # lines[declaration_line] = "// " + lines[declaration_line] removed_lines += [declaration_line] return "\n".join([line for line in lines if not line is None]) def _variable_in_line(variable_name, line): "Check if variable name is used in line" if not variable_name in line: return False for character in special_characters: line = line.replace(character, "\\" + character) delimiter = "[" + ",".join(["\\" + c for c in special_characters]) + "]" return not re.search(delimiter + variable_name + delimiter, line) == None ffc-1.6.0/ffc/enrichedelement.py000066400000000000000000000065561255571034100165360ustar00rootroot00000000000000# Copyright (C) 2010 Marie E. Rognes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-03-07 # Last changed: 2010-03-07 import numpy from .utils import pick_first from .mixedelement import _combine_entity_dofs, _num_components class EnrichedElement: "Create the space spanned by a list of ffc elements." def __init__(self, elements): self._elements = elements self._entity_dofs = _combine_entity_dofs(elements) def elements(self): return self._elements def space_dimension(self): return sum(e.space_dimension() for e in self._elements) def value_shape(self): return pick_first([e.value_shape() for e in self._elements]) def degree(self): return max(e.degree() for e in self._elements) def entity_dofs(self): return self._entity_dofs def mapping(self): return [m for e in self._elements for m in e.mapping()] def dual_basis(self): # NOTE: dual basis is not sum of subelements basis; it needs to be # recomputed so that \psi_j(\phi_i) = \delta_{ij} for # \phi_i basis functions and \psi_j dual basis functions return [None for e in self._elements for L in e.dual_basis()] def tabulate(self, order, points): num_components = _num_components(self) table_shape = (self.space_dimension(), num_components, len(points)) table = {} irange = (0, 0) for element in self._elements: etable = element.tabulate(order, points) irange = (irange[1], irange[1] + element.space_dimension()) # Insert element table into table for dtuple in etable.keys(): if not dtuple in table: if num_components == 1: table[dtuple] = numpy.zeros((self.space_dimension(), len(points))) else: table[dtuple] = numpy.zeros(table_shape) table[dtuple][irange[0]:irange[1]][:] = etable[dtuple] return table class SpaceOfReals: def __init__(self, element): self._element = element self._entity_dofs = element.entity_dofs() def space_dimension(self): return 1 def value_shape(self): return () def degree(self): return 0 def entity_dofs(self): return self._entity_dofs def mapping(self): return ["affine"] def dual_basis(self): return self._element.dual_basis() def tabulate(self, order, points): return self._element.tabulate(order, points) def get_coeffs(self): return self._element.get_coeffs() def dmats(self): return self._element.dmats() def get_num_members(self, arg): return self._element.get_num_members(arg) ffc-1.6.0/ffc/errorcontrol/000077500000000000000000000000001255571034100155475ustar00rootroot00000000000000ffc-1.6.0/ffc/errorcontrol/__init__.py000066400000000000000000000004311255571034100176560ustar00rootroot00000000000000""" This module contains functionality for working with automated goal-oriented error control. In particular it offers the following function: compile_with_error_control - Compile forms and generate error control forms """ from .errorcontrol import compile_with_error_control ffc-1.6.0/ffc/errorcontrol/errorcontrol.py000066400000000000000000000126151255571034100206600ustar00rootroot00000000000000""" This module provides compilation of forms required for goal-oriented error control """ # Copyright (C) 2010 Marie E. Rognes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . from ufl.utils.sorting import sorted_by_key from ufl import Coefficient from ffc.log import info, error from ffc.compiler import compile_form __all__ = ["compile_with_error_control"] def compile_with_error_control(forms, object_names, reserved_objects, prefix, parameters): """ Compile forms and additionally generate and compile forms required for performing goal-oriented error control For linear problems, the input forms should be a bilinear form (a) and a linear form (L) specifying the variational problem and additionally a linear form (M) specifying the goal functional. For nonlinear problems, the input should be linear form (F) and a functional (M) specifying the goal functional. *Arguments* forms (tuple) Three (linear case) or two (nonlinear case) forms specifying the primal problem and the goal object_names (dict) Map from object ids to object names reserved_names (dict) Map from reserved object names to object ids prefix (string) Basename of header file parameters (dict) Parameters for form compilation """ # Check input arguments F, M, u = prepare_input_arguments(forms, object_names, reserved_objects) # Generate forms to be used for the error control from ffc.errorcontrol.errorcontrolgenerators import UFLErrorControlGenerator generator = UFLErrorControlGenerator(F, M, u) ec_forms = generator.generate_all_error_control_forms() # Check that there are no conflicts between user defined and # generated names ec_names = generator.ec_names comment = "%s are reserved error control names." % str(sorted(ec_names.values())) assert not (set(object_names.values()) & set(ec_names.values())), \ "Conflict between user defined and generated names: %s" % comment # Add names generated for error control to object_names for (objid, name) in sorted_by_key(ec_names): object_names[objid] = name # Compile error control and input (pde + goal) forms as normal forms = generator.primal_forms() compile_form(ec_forms + forms, object_names, prefix, parameters) return 0 def prepare_input_arguments(forms, object_names, reserved_objects): """ Extract required input arguments to UFLErrorControlGenerator. *Arguments* forms (tuple) Three (linear case) or two (nonlinear case) forms specifying the primal problem and the goal object_names (dict) Map from object ids to object names reserved_names (dict) Map from reserved object names to object ids *Returns* tuple (of length 3) containing Form or tuple A single linear form or a tuple of a bilinear and a linear form Form A linear form or a functional for the goal functional Coefficient The coefficient considered as the unknown """ # Check that we get a tuple of forms expecting_tuple_msg = "Expecting tuple of forms, got %s" % str(forms) assert(isinstance(forms, (list, tuple))), expecting_tuple_msg def __is_nonlinear(forms): return len(forms) == 2 def __is_linear(forms): return len(forms) == 3 # Extract Coefficient labelled as 'unknown' u = reserved_objects.get("unknown", None) if __is_nonlinear(forms): (F, M) = forms # Check that unknown is defined assert (u), "Can't extract 'unknown'. The Coefficient representing the unknown must be labelled by 'unknown' for nonlinear problems." # Check that forms have the expected rank assert(len(F.arguments()) == 1) assert(len(M.arguments()) == 0) # Return primal, goal and unknown return (F, M, u) elif __is_linear(forms): # Throw error if unknown is given, don't quite know what to do # with this case yet if u: error("'unknown' defined: not implemented for linear problems") (a, L, M) = forms # Check that forms have the expected rank arguments = a.arguments() assert(len(arguments) == 2) assert(len(L.arguments()) == 1) assert(len(M.arguments()) == 1) # Standard case: create default Coefficient in trial space and # label it __discrete_primal_solution V = arguments[1].element() u = Coefficient(V) object_names[id(u)] = "__discrete_primal_solution" return ((a, L), M, u) else: error("Wrong input tuple length: got %s, expected 2 or 3-tuple" % str(forms)) ffc-1.6.0/ffc/errorcontrol/errorcontrolgenerators.py000066400000000000000000000243721255571034100227550ustar00rootroot00000000000000""" This module provides an abstract ErrorControlGenerator class for generating forms required for goal-oriented error control and a realization of this: UFLErrorControlGenerator for handling pure UFL forms. """ # Copyright (C) 2010 Marie E. Rognes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . from ufl import inner, dx, ds, dS, avg, replace, action __all__ = ["ErrorControlGenerator", "UFLErrorControlGenerator"] class ErrorControlGenerator: def __init__(self, module, F, M, u): """ *Arguments* module (Python module) The module to use for specific form manipulations (typically ufl or dolfin) F (tuple or Form) tuple of (bilinear, linear) forms or linear form M (Form) functional or linear form u (Coefficient) The coefficient considered as the unknown. """ # Store module self.module = module # Store solution Coefficient/Function self.u = u # Extract the lhs (bilinear form), rhs (linear form), goal # (functional), weak residual (linear form) linear_case = (isinstance(F, (tuple, list)) and len(F) == 2) if linear_case: self.lhs, self.rhs = F try: self.goal = action(M, u) except: self.goal = M # Allow functionals as input as well self.weak_residual = self.rhs - action(self.lhs, u) else: self.lhs = self.module.derivative(F, u) self.rhs = F self.goal = M self.weak_residual = - F # At least check that final forms have correct rank assert(len(self.lhs.arguments()) == 2) assert(len(self.rhs.arguments()) == 1) assert(len(self.goal.arguments()) == 0) assert(len(self.weak_residual.arguments()) == 1) # Get the domain, assuming there's only one assert(len(self.weak_residual.domains()) == 1) self.domain, = self.weak_residual.domains() # Store map from identifiers to names for forms and generated # coefficients self.ec_names = {} # Use predefined names for the forms in the primal problem self.ec_names[id(self.lhs)] = "lhs" self.ec_names[id(self.rhs)] = "rhs" self.ec_names[id(self.goal)] = "goal" # Initialize other required data self.initialize_data() def initialize_data(self): """ Initialize specific data """ msg = """ErrorControlGenerator acts as an abstract class. Subclasses must overload the initialize_data() method and provide a certain set of variables. See UFLErrorControlGenerator for an example.""" raise NotImplementedError(msg) def generate_all_error_control_forms(self): """ Utility function for generating all (8) forms required for error control in addition to the primal forms """ # Generate dual forms (a_star, L_star) = self.dual_forms() # Generate forms for computing strong cell residual (a_R_T, L_R_T) = self.cell_residual() # Generate forms for computing strong facet residuals (a_R_dT, L_R_dT) = self.facet_residual() # Generate form for computing error estimate eta_h = self.error_estimate() # Generate form for computing error indicators eta_T = self.error_indicators() # Paranoid checks added after introduction of multidomain features in ufl: for i, form in enumerate((a_star, L_star, eta_h, a_R_T, L_R_T, a_R_dT, L_R_dT, eta_T)): assert len(form.domains()) > 0, ("Zero domains at form %d" % i) assert len(form.domains()) == 1, ("%d domains at form %d" % (len(form.domains()), i)) # Return all generated forms in CERTAIN order matching # constructor of dolfin/adaptivity/ErrorControl.h return (a_star, L_star, eta_h, a_R_T, L_R_T, a_R_dT, L_R_dT, eta_T) def primal_forms(self): """ Return primal forms in order (bilinear, linear, functional) """ return self.lhs, self.rhs, self.goal def dual_forms(self): """ Generate and return (bilinear, linear) forms defining linear dual variational problem """ a_star = self.module.adjoint(self.lhs) L_star = self.module.derivative(self.goal, self.u) return (a_star, L_star) def cell_residual(self): """ Generate and return (bilinear, linear) forms defining linear variational problem for the strong cell residual """ # Define trial and test functions for the cell residuals on # discontinuous version of primal trial space R_T = self.module.TrialFunction(self._dV) v = self.module.TestFunction(self._dV) # Extract original test function in the weak residual v_h = self.weak_residual.arguments()[0] # Define forms defining linear variational problem for cell # residual v_T = self._b_T*v a_R_T = inner(v_T, R_T)*dx(self.domain) L_R_T = replace(self.weak_residual, {v_h: v_T}) return (a_R_T, L_R_T) def facet_residual(self): """ Generate and return (bilinear, linear) forms defining linear variational problem for the strong facet residual(s) """ # Define trial and test functions for the facet residuals on # discontinuous version of primal trial space R_e = self.module.TrialFunction(self._dV) v = self.module.TestFunction(self._dV) # Extract original test function in the weak residual v_h = self.weak_residual.arguments()[0] # Define forms defining linear variational problem for facet # residual v_e = self._b_e*v a_R_dT = ((inner(v_e('+'), R_e('+')) + inner(v_e('-'), R_e('-')))*dS(self.domain) + inner(v_e, R_e)*ds(self.domain)) L_R_dT = (replace(self.weak_residual, {v_h: v_e}) - inner(v_e, self._R_T)*dx(self.domain)) return (a_R_dT, L_R_dT) def error_estimate(self): """ Generate and return functional defining error estimate """ # Error estimate defined as r(Ez_h): eta_h = action(self.weak_residual, self._Ez_h) return eta_h def error_indicators(self): """ Generate and return linear form defining error indicators """ # Extract these to increase readability R_T = self._R_T R_dT = self._R_dT z = self._Ez_h z_h = self._z_h # Define linear form for computing error indicators v = self.module.TestFunction(self._DG0) eta_T = (v*inner(R_T, z - z_h)*dx(self.domain) + avg(v)*(inner(R_dT('+'), (z - z_h)('+')) + inner(R_dT('-'), (z - z_h)('-')))*dS(self.domain) + v*inner(R_dT, z - z_h)*ds(self.domain)) return eta_T class UFLErrorControlGenerator(ErrorControlGenerator): """ This class provides a realization of ErrorControlGenerator for use with pure UFL forms """ def __init__(self, F, M, u): """ *Arguments* F (tuple or Form) tuple of (bilinear, linear) forms or linear form M (Form) functional or linear form u (Coefficient) The coefficient considered as the unknown. """ ErrorControlGenerator.__init__(self, __import__("ufl"), F, M, u) def initialize_data(self): """ Extract required objects for defining error control forms. This will be stored, reused and in particular named. """ # Developer's note: The UFL-FFC-DOLFIN--PyDOLFIN toolchain for # error control is quite fine-tuned. In particular, the order # of coefficients in forms is (and almost must be) used for # their assignment. This means that the order in which these # coefficients are defined matters and should be considered # fixed. from ufl import FiniteElement, Coefficient from ufl.algorithms.elementtransformations import tear, increase_order # Primal trial element space self._V = self.u.element() # Primal test space == Dual trial space Vhat = self.weak_residual.arguments()[0].element() # Discontinuous version of primal trial element space self._dV = tear(self._V) # Extract domain and geometric dimension domain, = self._V.domains() gdim = domain.geometric_dimension() # Coefficient representing improved dual E = increase_order(Vhat) self._Ez_h = Coefficient(E) self.ec_names[id(self._Ez_h)] = "__improved_dual" # Coefficient representing cell bubble function B = FiniteElement("B", domain, gdim + 1) self._b_T = Coefficient(B) self.ec_names[id(self._b_T)] = "__cell_bubble" # Coefficient representing strong cell residual self._R_T = Coefficient(self._dV) self.ec_names[id(self._R_T)] = "__cell_residual" # Coefficient representing cell cone function C = FiniteElement("DG", domain, gdim) self._b_e = Coefficient(C) self.ec_names[id(self._b_e)] = "__cell_cone" # Coefficient representing strong facet residual self._R_dT = Coefficient(self._dV) self.ec_names[id(self._R_dT)] = "__facet_residual" # Define discrete dual on primal test space self._z_h = Coefficient(Vhat) self.ec_names[id(self._z_h)] = "__discrete_dual_solution" # Piecewise constants for assembling indicators self._DG0 = FiniteElement("DG", domain, 0) ffc-1.6.0/ffc/evaluatebasis.py000066400000000000000000001016571255571034100162310ustar00rootroot00000000000000"""Code generation for evaluation of finite element basis values. This module generates code which is more or less a C++ representation of the code found in FIAT.""" # Copyright (C) 2007-2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2007-04-04 # Last changed: 2015-03-28 # # Modified by Marie E. Rognes 2011 # Modified by Anders Logg 2013 # Modified by Lizao Li 2015 # # MER: The original module generated code that was more or less a C++ # representation of the code found in FIAT. I've modified this (for 2 # and 3D) to generate code that does the same as FIAT, but with loops # unrolled etc, thus removing unnecessary computations at runtime. # There might be some clean-ups required, specially after this. # Python modules import math import numpy # FFC modules from ffc.log import error from ffc.cpp import remove_unused, indent, format from ffc.quadrature.symbolics import create_float, create_float, create_symbol,\ create_product, create_sum, create_fraction, CONST def _evaluate_basis_all(data): """Like evaluate_basis, but return the values of all basis functions (dofs).""" if isinstance(data, str): return format["exception"]("evaluate_basis_all: %s" % data) # Prefetch formats. f_assign = format["assign"] f_component = format["component"] f_comment = format["comment"] f_loop = format["generate loop"] f_r, f_s = format["free indices"][:2] f_tensor = format["tabulate tensor"] f_values = format["argument values"] f_basis = format["call basis"] f_dof_vals = format["dof values"] f_double = format["float declaration"] f_float = format["floating point"] f_decl = format["declaration"] f_ref_var = format["reference variable"] # Initialise return code. code = [] # FIXME: KBO: Figure out how the return format should be, either: # [N0[0], N0[1], N1[0], N1[1], ...] # or # [N0[0], N1[0], ..., N0[1], N1[1], ...] # for vector (tensor elements), currently returning option 1. # FIXME: KBO: For now, just call evaluate_basis and map values accordingly, # this will keep the amount of code at a minimum. If it turns out that speed # is an issue (overhead from calling evaluate_basis), we can easily generate # all the code # Get total value shape and space dimension for entire element (possibly mixed). physical_value_size = data["physical_value_size"] space_dimension = data["space_dimension"] # Special case where space dimension is one (constant elements). if space_dimension == 1: code += [f_comment("Element is constant, calling evaluate_basis.")] code += [f_basis(format["int"](0), f_values)] return "\n".join(code) # Declare helper value to hold single dof values. code += [f_comment("Helper variable to hold values of a single dof.")] if physical_value_size == 1: code += [f_decl(f_double, f_dof_vals, f_float(0.0))] else: code += [f_decl(f_double, f_component(f_dof_vals, physical_value_size), f_tensor([0.0]*physical_value_size))] # Create loop over dofs that calls evaluate_basis for a single dof and # inserts the values into the global array. code += ["", f_comment("Loop dofs and call evaluate_basis")] lines_r = [] loop_vars_r = [(f_r, 0, space_dimension)] if physical_value_size == 1: lines_r += [f_basis(f_r, f_ref_var(f_dof_vals))] else: lines_r += [f_basis(f_r, f_dof_vals)] if physical_value_size == 1: lines_r += [f_assign(f_component(f_values, f_r), f_dof_vals)] else: index = format["matrix index"](f_r, f_s, physical_value_size) lines_s = [f_assign(f_component(f_values, index), f_component(f_dof_vals, f_s))] lines_r += f_loop(lines_s, [(f_s, 0, physical_value_size)]) code += f_loop(lines_r, loop_vars_r) # Generate code (no need to remove unused). return "\n".join(code) # From FIAT_NEW.polynomial_set.tabulate() def _evaluate_basis(data): """Generate run time code to evaluate an element basisfunction at an arbitrary point. The value(s) of the basisfunction is/are computed as in FIAT as the dot product of the coefficients (computed at compile time) and basisvalues which are dependent on the coordinate and thus have to be computed at run time. The function should work for all elements supported by FIAT, but it remains untested for tensor valued elements.""" if isinstance(data, str): return format["exception"]("evaluate_basis: %s" % data) # Prefetch formats. f_assign = format["assign"] f_comment = format["comment"] f_values = format["argument values"] f_float = format["floating point"] f_component = format["component"] # Initialise return code. code = [] # Get the element cell name and geometric dimension. element_cellname = data["cellname"] gdim = data["geometric_dimension"] tdim = data["topological_dimension"] # Get code snippets for Jacobian, Inverse of Jacobian and mapping of # coordinates from physical element to the FIAT reference element. code += [format["compute_jacobian"](tdim, gdim)] code += [format["compute_jacobian_inverse"](tdim, gdim)] if data["needs_oriented"]: code += [format["orientation"](tdim, gdim)] code += ["", format["fiat coordinate map"](element_cellname, gdim)] # Get value shape and reset values. This should also work for TensorElement, # scalar are empty tuples, therefore (1,) in which case value_shape = 1. reference_value_size = data["reference_value_size"] code += ["", f_comment("Reset values")] if reference_value_size == 1: # Reset values as a pointer. code += [f_assign(format["dereference pointer"](f_values), f_float(0.0))] else: # Reset all values. code += [f_assign(f_component(f_values, i), f_float(0.0)) for i in range(reference_value_size)] # Create code for all basis values (dofs). dof_cases = [] for dof in data["dof_data"]: dof_cases.append(_generate_dof_code(data, dof)) code += [format["switch"](format["argument basis num"], dof_cases)] # Remove unused variables (from transformations and mappings) in code. code = remove_unused("\n".join(code)) #code = "\n".join(code) return code def _generate_dof_code(data, dof_data): """Generate code for a single basis element as the dot product of coefficients and basisvalues. Then apply transformation if applicable.""" # Generate basisvalues. code = _compute_basisvalues(data, dof_data) # Tabulate coefficients. code += _tabulate_coefficients(dof_data) # Compute the value of the basisfunction as the dot product of the # coefficients and basisvalues and apply transformation. code += _compute_values(data, dof_data) return remove_unused("\n".join(code)) def _tabulate_coefficients(dof_data): """This function tabulates the element coefficients that are generated by FIAT at compile time.""" # Prefetch formats to speed up code generation. f_comment = format["comment"] f_table = format["static const float declaration"] f_coefficients = format["coefficients"] f_component = format["component"] f_decl = format["declaration"] f_tensor = format["tabulate tensor"] f_new_line = format["new line"] # Get coefficients from basis functions, computed by FIAT at compile time. coefficients = dof_data["coeffs"] # Initialise return code. code = [f_comment("Table(s) of coefficients")] # Get number of members of the expansion set. num_mem = dof_data["num_expansion_members"] # Generate tables for each component. for i, coeffs in enumerate(coefficients): # Varable name for coefficients. name = f_component(f_coefficients(i), num_mem) # Generate array of values. code += [f_decl(f_table, name, f_new_line + f_tensor(coeffs))] + [""] return code def _compute_values(data, dof_data): """This function computes the value of the basisfunction as the dot product of the coefficients and basisvalues.""" # Prefetch formats to speed up code generation. f_values = format["argument values"] f_component = format["component"] f_comment = format["comment"] f_add = format["add"] f_coefficients = format["coefficients"] f_basisvalues = format["basisvalues"] f_r = format["free indices"][0] # f_dof = format["local dof"] f_deref_pointer = format["dereference pointer"] f_detJ = format["det(J)"] f_inv = format["inverse"] f_mul = format["mul"] f_iadd = format["iadd"] f_group = format["grouping"] f_tmp_ref = format["tmp ref value"] f_assign = format["assign"] f_loop = format["generate loop"] f_const_float = format["const float declaration"] f_trans = format["transform"] f_inner = format["inner product"] tdim = data["topological_dimension"] gdim = data["geometric_dimension"] # Initialise return code. code = [f_comment("Compute value(s)")] # Get dof data. num_components = dof_data["num_components"] offset = dof_data["offset"] lines = [] if data["reference_value_size"] != 1: # Loop number of components. for i in range(num_components): # Generate name and value to create matrix vector multiply. name = f_component(f_values, i + offset) value = f_mul([f_component(f_coefficients(i), f_r),\ f_component(f_basisvalues, f_r)]) lines += [f_iadd(name, value)] else: # Generate name and value to create matrix vector multiply. name = f_deref_pointer(f_values) value = f_mul([f_component(f_coefficients(0), f_r),\ f_component(f_basisvalues, f_r)]) lines = [f_iadd(name, value)] # Get number of members of the expansion set. num_mem = dof_data["num_expansion_members"] loop_vars = [(f_r, 0, num_mem)] code += f_loop(lines, loop_vars) # Apply transformation if applicable. mapping = dof_data["mapping"] if mapping == "affine": pass elif mapping == "contravariant piola": code += ["", f_comment("Using contravariant Piola transform to map values back to the physical element")] # Get temporary values before mapping. code += [f_const_float(f_tmp_ref(i), f_component(f_values, i + offset))\ for i in range(num_components)] # Create names for inner product. basis_col = [f_tmp_ref(j) for j in range(tdim)] for i in range(gdim): # Create Jacobian. jacobian_row = [f_trans("J", i, j, gdim, tdim, None) for j in range(tdim)] # Create inner product and multiply by inverse of Jacobian. inner = f_group(f_inner(jacobian_row, basis_col)) value = f_mul([f_inv(f_detJ(None)), inner]) name = f_component(f_values, i + offset) code += [f_assign(name, value)] elif mapping == "covariant piola": code += ["", f_comment("Using covariant Piola transform to map values back to the physical element")] # Get temporary values before mapping. code += [f_const_float(f_tmp_ref(i), f_component(f_values, i + offset))\ for i in range(num_components)] # Create names for inner product. tdim = data["topological_dimension"] gdim = data["geometric_dimension"] basis_col = [f_tmp_ref(j) for j in range(tdim)] for i in range(gdim): # Create inverse of Jacobian. inv_jacobian_column = [f_trans("JINV", j, i, tdim, gdim, None) for j in range(tdim)] # Create inner product of basis values and inverse of Jacobian. value = f_group(f_inner(inv_jacobian_column, basis_col)) name = f_component(f_values, i + offset) code += [f_assign(name, value)] elif mapping == "pullback as metric": code += ["", f_comment("Using metric pullback to map values back to the physical element")] # Get temporary values before mapping. code += [f_const_float(f_tmp_ref(i), f_component(f_values, i + offset))\ for i in range(num_components)] # Create names for inner product. tdim = data["topological_dimension"] gdim = data["geometric_dimension"] basis_col = [f_tmp_ref(j) for j in range(num_components)] for p in range(num_components): # unflatten the indices i = p // tdim l = p % tdim # g_il = K_ji G_jk K_kl value = f_group(f_inner( [f_inner([f_trans("JINV", j, i, tdim, gdim, None) for j in range(tdim)], [basis_col[j * tdim + k] for j in range(tdim)]) for k in range(tdim)], [f_trans("JINV", k, l, tdim, gdim, None) for k in range(tdim)])) name = f_component(f_values, p + offset) code += [f_assign(name, value)] else: error("Unknown mapping: %s" % mapping) return code def _compute_basisvalues(data, dof_data): """From FIAT_NEW.expansions.""" UNROLL = True # Prefetch formats to speed up code generation. f_comment = format["comment"] f_add = format["add"] f_mul = format["mul"] f_imul = format["imul"] f_sub = format["sub"] f_group = format["grouping"] f_assign = format["assign"] f_sqrt = format["sqrt"] f_x = format["x coordinate"] f_y = format["y coordinate"] f_z = format["z coordinate"] f_double = format["float declaration"] f_basisvalue = format["basisvalues"] f_component = format["component"] f_float = format["floating point"] f_uint = format["uint declaration"] f_tensor = format["tabulate tensor"] f_loop = format["generate loop"] f_decl = format["declaration"] f_tmp = format["tmp value"] f_int = format["int"] f_r, f_s, f_t = format["free indices"][:3] idx0 = f_r + f_r idx1 = f_s + f_s idx2 = f_t + f_t # Create temporary values. f1, f2, f3, f4, f5 = [create_symbol(f_tmp(i), CONST) for i in range(0,5)] an, bn, cn = [create_symbol(f_tmp(i), CONST) for i in range(5,8)] # Get embedded degree. embedded_degree = dof_data["embedded_degree"] # Create helper symbols. symbol_p = create_symbol(f_r, CONST) symbol_q = create_symbol(f_s, CONST) symbol_r = create_symbol(f_t, CONST) symbol_x = create_symbol(f_x, CONST) symbol_y = create_symbol(f_y, CONST) symbol_z = create_symbol(f_z, CONST) basis_idx0 = create_symbol(f_component(f_basisvalue, idx0), CONST) basis_idx1 = create_symbol(f_component(f_basisvalue, idx1), CONST) basis_idx2 = create_symbol(f_component(f_basisvalue, idx2), CONST) int_0 = f_int(0) int_1 = f_int(1) int_2 = f_int(2) int_n = f_int(embedded_degree) int_n1 = f_int(embedded_degree + 1) int_nm1 = f_int(embedded_degree - 1) float_0 = create_float(0) float_1 = create_float(1) float_2 = create_float(2) float_3 = create_float(3) float_4 = create_float(4) float_1_5 = create_float(1.5) float_0_5 = create_float(0.5) float_0_25 = create_float(0.25) # Initialise return code. code = [""] # Create zero array for basisvalues. # Get number of members of the expansion set. num_mem = dof_data["num_expansion_members"] code += [f_comment("Array of basisvalues")] code += [f_decl(f_double, f_component(f_basisvalue, num_mem), f_tensor([0.0]*num_mem))] # Declare helper variables, will be removed if not used. code += ["", f_comment("Declare helper variables")] code += [f_decl(f_uint, idx0, int_0)] code += [f_decl(f_uint, idx1, int_0)] code += [f_decl(f_uint, idx2, int_0)] code += [f_decl(f_double, str(an), f_float(0))] code += [f_decl(f_double, str(bn), f_float(0))] code += [f_decl(f_double, str(cn), f_float(0))] # Get the element cell name element_cellname = data["cellname"] def _jrc(a, b, n): an = float( ( 2*n+1+a+b)*(2*n+2+a+b))/ float( 2*(n+1)*(n+1+a+b)) bn = float( (a*a-b*b) * (2*n+1+a+b))/ float( 2*(n+1)*(2*n+a+b)*(n+1+a+b) ) cn = float( (n+a)*(n+b)*(2*n+2+a+b))/ float( (n+1)*(n+1+a+b)*(2*n+a+b) ) return (an,bn,cn) # 1D if (element_cellname == "interval"): # FIAT_NEW.expansions.LineExpansionSet. # FIAT_NEW code # psitilde_as = jacobi.eval_jacobi_batch(0,0,n,ref_pts) # FIAT_NEW.jacobi.eval_jacobi_batch(a,b,n,xs) # The initial value basisvalue 0 is always 1.0 # FIAT_NEW code # for ii in range(result.shape[1]): # result[0,ii] = 1.0 + xs[ii,0] - xs[ii,0] code += ["", f_comment("Compute basisvalues")] code += [f_assign(f_component(f_basisvalue, 0), f_float(1.0))] # Only continue if the embedded degree is larger than zero. if embedded_degree > 0: # FIAT_NEW.jacobi.eval_jacobi_batch(a,b,n,xs). # result[1,:] = 0.5 * ( a - b + ( a + b + 2.0 ) * xsnew ) # The initial value basisvalue 1 is always x code += [f_assign(f_component(f_basisvalue, 1), f_x)] # Only active is embedded_degree > 1. if embedded_degree > 1: # FIAT_NEW.jacobi.eval_jacobi_batch(a,b,n,xs). # apb = a + b (equal to 0 because of function arguments) # for k in range(2,n+1): # a1 = 2.0 * k * ( k + apb ) * ( 2.0 * k + apb - 2.0 ) # a2 = ( 2.0 * k + apb - 1.0 ) * ( a * a - b * b ) # a3 = ( 2.0 * k + apb - 2.0 ) \ # * ( 2.0 * k + apb - 1.0 ) \ # * ( 2.0 * k + apb ) # a4 = 2.0 * ( k + a - 1.0 ) * ( k + b - 1.0 ) \ # * ( 2.0 * k + apb ) # a2 = a2 / a1 # a3 = a3 / a1 # a4 = a4 / a1 # result[k,:] = ( a2 + a3 * xsnew ) * result[k-1,:] \ # - a4 * result[k-2,:] # The below implements the above (with a = b = apb = 0) for r in range(2, embedded_degree+1): # Define helper variables a1 = 2.0*r*r*(2.0*r - 2.0) a3 = ((2.0*r - 2.0)*(2.0*r - 1.0 )*(2.0*r))/a1 a4 = (2.0*(r - 1.0)*(r - 1.0)*(2.0*r))/a1 assign_to = f_component(f_basisvalue, r) assign_from = f_sub([f_mul([f_x, f_component(f_basisvalue, r-1), f_float(a3)]), f_mul([f_component(f_basisvalue, r-2), f_float(a4)])]) code += [f_assign(assign_to, assign_from)] # Scale values. # FIAT_NEW.expansions.LineExpansionSet. # FIAT_NEW code # results = numpy.zeros( ( n+1 , len(pts) ) , type( pts[0][0] ) ) # for k in range( n + 1 ): # results[k,:] = psitilde_as[k,:] * math.sqrt( k + 0.5 ) lines = [] loop_vars = [(str(symbol_p), 0, int_n1)] # Create names. basis_k = create_symbol(f_component(f_basisvalue, str(symbol_p)), CONST) # Compute value. fac1 = create_symbol( f_sqrt(str(symbol_p + float_0_5)), CONST ) lines += [format["imul"](str(basis_k), str(fac1))] # Create loop (block of lines). code += f_loop(lines, loop_vars) # 2D elif (element_cellname == "triangle"): # FIAT_NEW.expansions.TriangleExpansionSet. # Compute helper factors # FIAT_NEW code # f1 = (1.0+2*x+y)/2.0 # f2 = (1.0 - y) / 2.0 # f3 = f2**2 fac1 = create_fraction(float_1 + float_2*symbol_x + symbol_y, float_2) fac2 = create_fraction(float_1 - symbol_y, float_2) code += [f_decl(f_double, str(f1), fac1)] code += [f_decl(f_double, str(f2), fac2)] code += [f_decl(f_double, str(f3), f2*f2)] code += ["", f_comment("Compute basisvalues")] # The initial value basisvalue 0 is always 1.0. # FIAT_NEW code # for ii in range( results.shape[1] ): # results[0,ii] = 1.0 + apts[ii,0]-apts[ii,0]+apts[ii,1]-apts[ii,1] code += [f_assign(f_component(f_basisvalue, 0), f_float(1.0))] def _idx2d(p, q): return (p + q)*(p + q + 1)//2 + q # Only continue if the embedded degree is larger than zero. if embedded_degree > 0: # The initial value of basisfunction 1 is equal to f1. # FIAT_NEW code # results[idx(1,0),:] = f1 code += [f_assign(f_component(f_basisvalue, 1), str(f1))] # NOTE: KBO: The order of the loops is VERY IMPORTANT!! # Only active is embedded_degree > 1. if embedded_degree > 1: # FIAT_NEW code (loop 1 in FIAT) # for p in range(1,n): # a = (2.0*p+1)/(1.0+p) # b = p / (p+1.0) # results[idx(p+1,0)] = a * f1 * results[idx(p,0),:] \ # - p/(1.0+p) * f3 *results[idx(p-1,0),:] # FIXME: KBO: Is there an error in FIAT? why is b not used? for r in range(1, embedded_degree): rr = _idx2d((r + 1), 0) assign_to = f_component(f_basisvalue, rr) ss = _idx2d(r, 0) tt = _idx2d((r - 1), 0) A = (2*r + 1.0)/(r + 1) B = r/(1.0 + r) v1 = f_mul([f_component(f_basisvalue, ss), f_float(A), str(f1)]) v2 = f_mul([f_component(f_basisvalue, tt), f_float(B), str(f3)]) assign_from = f_sub([v1, v2]) code += [f_assign(assign_to, assign_from)] # FIAT_NEW code (loop 2 in FIAT). # for p in range(n): # results[idx(p,1),:] = 0.5 * (1+2.0*p+(3.0+2.0*p)*y) \ # * results[idx(p,0)] for r in range(0, embedded_degree): # (p+q)*(p+q+1)//2 + q rr = _idx2d(r, 1) assign_to = f_component(f_basisvalue, rr) ss = _idx2d(r, 0) A = 0.5*(1 + 2*r) B = 0.5*(3 + 2*r) C = f_add([f_float(A), f_mul([f_float(B), str(symbol_y)])]) assign_from = f_mul([f_component(f_basisvalue, ss), f_group(C)]) code += [f_assign(assign_to, assign_from)] # Only active is embedded_degree > 1. if embedded_degree > 1: # FIAT_NEW code (loop 3 in FIAT). # for p in range(n-1): # for q in range(1,n-p): # (a1,a2,a3) = jrc(2*p+1,0,q) # results[idx(p,q+1),:] \ # = ( a1 * y + a2 ) * results[idx(p,q)] \ # - a3 * results[idx(p,q-1)] for r in range(0, embedded_degree - 1): for s in range(1, embedded_degree - r): rr = _idx2d(r, (s + 1)) ss = _idx2d(r, s) tt = _idx2d(r, s - 1) A, B, C = _jrc(2*r + 1, 0, s) assign_to = f_component(f_basisvalue, rr) assign_from = f_sub([f_mul([f_component(f_basisvalue, ss), f_group(f_add([f_float(B), f_mul([str(symbol_y), f_float(A)])]))]), f_mul([f_component(f_basisvalue, tt), f_float(C)])]) code += [f_assign(assign_to, assign_from)] # FIAT_NEW code (loop 4 in FIAT). # for p in range(n+1): # for q in range(n-p+1): # results[idx(p,q),:] *= math.sqrt((p+0.5)*(p+q+1.0)) n1 = embedded_degree + 1 for r in range(0, n1): for s in range(0, n1 - r): rr = _idx2d(r, s) A = (r + 0.5)*(r + s + 1) assign_to = f_component(f_basisvalue, rr) code += [f_imul(assign_to, f_sqrt(A))] # 3D elif (element_cellname == "tetrahedron"): # FIAT_NEW code (compute index function) TetrahedronExpansionSet. # def idx(p,q,r): # return (p+q+r)*(p+q+r+1)*(p+q+r+2)//6 + (q+r)*(q+r+1)//2 + r def _idx3d(p, q, r): return (p+q+r)*(p+q+r+1)*(p+q+r+2)//6 + (q+r)*(q+r+1)//2 + r # FIAT_NEW.expansions.TetrahedronExpansionSet. # Compute helper factors. # FIAT_NEW code # factor1 = 0.5 * ( 2.0 + 2.0*x + y + z ) # factor2 = (0.5*(y+z))**2 # factor3 = 0.5 * ( 1 + 2.0 * y + z ) # factor4 = 0.5 * ( 1 - z ) # factor5 = factor4 ** 2 fac1 = create_product([float_0_5, float_2 + float_2*symbol_x + symbol_y + symbol_z]) fac2 = create_product([float_0_25, symbol_y + symbol_z, symbol_y + symbol_z]) fac3 = create_product([float_0_5, float_1 + float_2*symbol_y + symbol_z]) fac4 = create_product([float_0_5, float_1 - symbol_z]) code += [f_decl(f_double, str(f1), fac1)] code += [f_decl(f_double, str(f2), fac2)] code += [f_decl(f_double, str(f3), fac3)] code += [f_decl(f_double, str(f4), fac4)] code += [f_decl(f_double, str(f5), f4*f4)] code += ["", f_comment("Compute basisvalues")] # The initial value basisvalue 0 is always 1.0. # FIAT_NEW code # for ii in range( results.shape[1] ): # results[0,ii] = 1.0 + apts[ii,0]-apts[ii,0]+apts[ii,1]-apts[ii,1] code += [f_assign(f_component(f_basisvalue, 0), f_float(1.0))] # Only continue if the embedded degree is larger than zero. if embedded_degree > 0: # The initial value of basisfunction 1 is equal to f1. # FIAT_NEW code # results[idx(1,0),:] = f1 code += [f_assign(f_component(f_basisvalue, 1), str(f1))] # NOTE: KBO: The order of the loops is VERY IMPORTANT!! # Only active is embedded_degree > 1 if embedded_degree > 1: # FIAT_NEW code (loop 1 in FIAT). # for p in range(1,n): # a1 = ( 2.0 * p + 1.0 ) / ( p + 1.0 ) # a2 = p / (p + 1.0) # results[idx(p+1,0,0)] = a1 * factor1 * results[idx(p,0,0)] \ # -a2 * factor2 * results[ idx(p-1,0,0) ] for r in range(1, embedded_degree): rr = _idx3d((r + 1), 0, 0) ss = _idx3d(r, 0, 0) tt = _idx3d((r - 1), 0, 0) A = (2*r + 1.0)/(r + 1) B = r/(r + 1.0) assign_to = f_component(f_basisvalue, rr) assign_from = f_sub([f_mul([f_float(A), str(f1), f_component(f_basisvalue, ss)]), f_mul([f_float(B), str(f2), f_component(f_basisvalue, tt)])]) code += [f_assign(assign_to, assign_from)] # FIAT_NEW code (loop 2 in FIAT). # q = 1 # for p in range(0,n): # results[idx(p,1,0)] = results[idx(p,0,0)] \ # * ( p * (1.0 + y) + ( 2.0 + 3.0 * y + z ) / 2 ) for r in range(0, embedded_degree): rr = _idx3d(r, 1, 0) ss = _idx3d(r, 0, 0) assign_to = f_component(f_basisvalue, rr) term0 = f_mul([f_float(0.5), f_group(f_add([f_float(2), f_mul([f_float(3), str(symbol_y)]), str(symbol_z)]))]) if r == 0: assign_from = f_mul([term0, f_component(f_basisvalue, ss)]) else: term1 = f_mul([f_float(r), f_group(f_add([f_float(1), str(symbol_y)]))]) assign_from = f_mul([f_group(f_add([term0, term1])), f_component(f_basisvalue, ss)]) code += [f_assign(assign_to, assign_from)] # Only active is embedded_degree > 1. if embedded_degree > 1: # FIAT_NEW code (loop 3 in FIAT). # for p in range(0,n-1): # for q in range(1,n-p): # (aq,bq,cq) = jrc(2*p+1,0,q) # qmcoeff = aq * factor3 + bq * factor4 # qm1coeff = cq * factor5 # results[idx(p,q+1,0)] = qmcoeff * results[idx(p,q,0)] \ # - qm1coeff * results[idx(p,q-1,0)] for r in range(0, embedded_degree - 1): for s in range(1, embedded_degree - r): rr = _idx3d(r, (s + 1), 0) ss = _idx3d(r, s, 0) tt = _idx3d(r, s - 1, 0) (A, B, C) = _jrc(2*r + 1, 0, s) assign_to = f_component(f_basisvalue, rr) term0 = f_mul([f_group(f_add([f_mul([f_float(A), str(f3)]), f_mul([f_float(B), str(f4)])])), f_component(f_basisvalue, ss)]) term1 = f_mul([f_float(C), str(f5), f_component(f_basisvalue, tt)]) assign_from = f_sub([term0, term1]) code += [f_assign(assign_to, assign_from)] # FIAT_NEW code (loop 4 in FIAT). # now handle r=1 # for p in range(n): # for q in range(n-p): # results[idx(p,q,1)] = results[idx(p,q,0)] \ # * ( 1.0 + p + q + ( 2.0 + q + p ) * z ) for r in range(0, embedded_degree): for s in range(0, embedded_degree - r): rr = _idx3d(r, s, 1) ss = _idx3d(r, s, 0) assign_to = f_component(f_basisvalue, rr) A = f_add([f_mul([f_float(2 + r + s), str(symbol_z)]), f_float(1 + r + s)]) assign_from = f_mul([f_group(A), f_component(f_basisvalue, ss)]) code += [f_assign(assign_to, assign_from)] # Only active is embedded_degree > 1. if embedded_degree > 1: # FIAT_NEW code (loop 5 in FIAT). # general r by recurrence # for p in range(n-1): # for q in range(0,n-p-1): # for r in range(1,n-p-q): # ar,br,cr = jrc(2*p+2*q+2,0,r) # results[idx(p,q,r+1)] = \ # (ar * z + br) * results[idx(p,q,r) ] \ # - cr * results[idx(p,q,r-1) ] for r in range(embedded_degree - 1): for s in range(0, embedded_degree - r - 1): for t in range(1, embedded_degree - r - s): rr = _idx3d(r, s, ( t + 1)) ss = _idx3d(r, s, t) tt = _idx3d(r, s, t - 1) (A, B, C) = _jrc(2*r + 2*s + 2, 0, t) assign_to = f_component(f_basisvalue, rr) az_b = f_group(f_add([f_float(B), f_mul([f_float(A), str(symbol_z)])])) assign_from = f_sub([f_mul([f_component(f_basisvalue, ss), az_b]), f_mul([f_float(C), f_component(f_basisvalue, tt)])]) code += [f_assign(assign_to, assign_from)] # FIAT_NEW code (loop 6 in FIAT). # for p in range(n+1): # for q in range(n-p+1): # for r in range(n-p-q+1): # results[idx(p,q,r)] *= math.sqrt((p+0.5)*(p+q+1.0)*(p+q+r+1.5)) for r in range(embedded_degree + 1): for s in range(embedded_degree - r + 1): for t in range(embedded_degree - r - s + 1): rr = _idx3d(r, s, t) A = (r + 0.5)*(r + s + 1)*(r + s + t + 1.5) assign_to = f_component(f_basisvalue, rr) multiply_by = f_sqrt(A) myline = f_imul(assign_to, multiply_by) code += [myline] else: error("Cannot compute basis values for shape: %d" % elemet_cell_domain) return code + [""] ffc-1.6.0/ffc/evaluatebasisderivatives.py000066400000000000000000000715131255571034100204740ustar00rootroot00000000000000"""Code generation for evaluation of derivatives of finite element basis values. This module generates code which is more or less a C++ representation of the code found in FIAT_NEW.""" # Copyright (C) 2007-2013 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Anders Logg 2013 # Modified by Lizao Li 2015 # # First added: 2007-04-16 # Last changed: 2015-03-28 # Python modules import math import numpy # FFC modules from ffc.log import error, ffc_assert from ffc.evaluatebasis import _compute_basisvalues, _tabulate_coefficients from ffc.cpp import remove_unused, indent, format def _evaluate_basis_derivatives_all(data): """Like evaluate_basis, but return the values of all basis functions (dofs).""" if isinstance(data, str): return format["exception"]("evaluate_basis_derivatives_all: %s" % data) # Prefetch formats. f_r, f_s = format["free indices"][:2] f_assign = format["assign"] f_loop = format["generate loop"] f_array = format["dynamic array"] f_dof_vals = format["dof values"] f_comment = format["comment"] f_derivs = format["call basis_derivatives"] f_values = format["argument values"] f_int = format["int"] f_num_derivs = format["num derivatives"] f_double = format["float declaration"] f_component = format["component"] f_mul = format["mul"] f_float = format["floating point"] f_index = format["matrix index"] f_del_array = format["delete dynamic array"] # Initialise return code code = [] # FIXME: KBO: Figure out which return format to use, either: # [dN0[0]/dx, dN0[0]/dy, dN0[1]/dx, dN0[1]/dy, dN1[0]/dx, # dN1[0]/dy, dN1[1]/dx, dN1[1]/dy, ...] # or # [dN0[0]/dx, dN1[0]/dx, ..., dN0[1]/dx, dN1[1]/dx, ..., # dN0[0]/dy, dN1[0]/dy, ..., dN0[1]/dy, dN1[1]/dy, ...] # or # [dN0[0]/dx, dN0[1]/dx, ..., dN1[0]/dx, dN1[1]/dx, ..., # dN0[0]/dy, dN0[1]/dy, ..., dN1[0]/dy, dN1[1]/dy, ...] # for vector (tensor elements), currently returning option 1. # FIXME: KBO: For now, just call evaluate_basis_derivatives and # map values accordingly, this will keep the amount of code at a # minimum. If it turns out that speed is an issue (overhead from # calling evaluate_basis), we can easily generate all the code. # Get total value shape and space dimension for entire element # (possibly mixed). physical_value_size = data["physical_value_size"] space_dimension = data["space_dimension"] max_degree = data["max_degree"] # Special case where space dimension is one (constant elements). if space_dimension == 1: code += [f_comment("Element is constant, calling evaluate_basis_derivatives.")] code += [f_derivs(f_int(0), f_values)] return "\n".join(code) # Compute number of derivatives. if data["topological_dimension"]==data["geometric_dimension"]: _g = "" else: _g = "_g" # If n == 0, call evaluate_basis. code += [format["comment"]("Call evaluate_basis_all if order of derivatives is equal to zero.")] cond = format["argument derivative order"] + format["is equal"] + format["int"](0) val = [format["call basis_all"]] val += [format["return"]("")] code += [format["if"](cond, indent("\n".join(val),2))] code += _compute_num_derivatives(data["geometric_dimension"], _g) if (physical_value_size == 1): num_vals = f_num_derivs(_g) else: num_vals = f_mul([f_int(physical_value_size), f_num_derivs(_g)]) # Reset values. code += ["", f_comment("Set values equal to zero.")] name = f_component(f_values, f_index(f_r, f_s, num_vals)) lines_s = [f_assign(name, f_float(0.0))] loop_s = [(f_s, 0, num_vals)] lines_r = f_loop(lines_s, loop_s) loop_r = [(f_r, 0, space_dimension)] code += f_loop(lines_r, loop_r) # If n > max_degree, return zeros. code += ["", format["comment"]("If order of derivatives is greater than the maximum polynomial degree, return zeros.")] cond = format["argument derivative order"] + format["greater than"] + f_int(max_degree) val = format["return"]("") code += [format["if"](cond, indent(val,2))] # Declare helper value to hold single dof values and reset. code += [f_comment("Helper variable to hold values of a single dof.")] nds = data["geometric_dimension"]**max_degree*physical_value_size code += [format["declaration"](f_double, f_component(f_dof_vals, f_int(nds)))] line = [f_assign(f_component(f_dof_vals, f_r), f_float(0.0))] code += f_loop(line, [(f_r, 0, nds)]) # Create loop over dofs that calls evaluate_basis_derivatives for a single dof and # inserts the values into the global array. code += ["", f_comment("Loop dofs and call evaluate_basis_derivatives.")] name = f_component(f_values, f_index(f_r, f_s, num_vals)) value = f_component(f_dof_vals, f_s) lines_s = [f_assign(name, value)] loop_s = [(f_s, 0, num_vals)] lines_r = [f_derivs(f_r, f_dof_vals)] lines_r += f_loop(lines_s, loop_s) loop_r = [(f_r, 0, space_dimension)] code += f_loop(lines_r, loop_r) # Generate bode (no need to remove unused). return "\n".join(code) def _evaluate_basis_derivatives(data): """Evaluate the derivatives of an element basisfunction at a point. The values are computed as in FIAT as the matrix product of the coefficients (computed at compile time), basisvalues which are dependent on the coordinate and thus have to be computed at run time and combinations (depending on the order of derivative) of dmats tables which hold the derivatives of the expansion coefficients.""" if isinstance(data, str): return format["exception"]("evaluate_basis_derivatives: %s" % data) # Initialise return code. code = [] # Get the element cell domain, geometric and topological dimension. element_cellname = data["cellname"] gdim = data["geometric_dimension"] tdim = data["topological_dimension"] max_degree = data["max_degree"] # Compute number of derivatives that has to be computed, and # declare an array to hold the values of the derivatives on the # reference element. code += [""] if tdim == gdim: _t = "" _g = "" code += _compute_num_derivatives(tdim, "") # Reset all values. code += _reset_values(data, _g) # Handle values of argument 'n'. code += _handle_degree(max_degree) # If max_degree is zero, return code (to avoid declarations such as # combinations[1][0]) and because there's nothing to compute.) if max_degree == 0: return remove_unused("\n".join(code)) # Generate geo code. code += _geometry_related_code(data, tdim, gdim, element_cellname) # Generate all possible combinations of derivatives. code += _generate_combinations(tdim, "", max_degree) else: _t = "_t" _g = "_g" code += _compute_num_derivatives(tdim, _t) code += [""] code += _compute_num_derivatives(gdim, _g) # Reset all values. code += _reset_values(data, _g) # Handle values of argument 'n'. code += _handle_degree(max_degree) # If max_degree is zero, return code (to avoid declarations such as # combinations[1][0]) and because there's nothing to compute.) if max_degree == 0: return remove_unused("\n".join(code)) # Generate geo code. code += _geometry_related_code(data, tdim, gdim, element_cellname) # Generate all possible combinations of derivatives. code += _generate_combinations(tdim, _t, max_degree) code += _generate_combinations(gdim, _g, max_degree) # Generate the transformation matrix. code += _generate_transform(element_cellname, gdim, tdim, max_degree) # Create code for all basis values (dofs). dof_cases = [] for dof in data["dof_data"]: dof_cases.append(_generate_dof_code(data, dof)) code += [format["switch"](format["argument basis num"], dof_cases)] code = remove_unused("\n".join(code)) #code = "\n".join(code) return code def _handle_degree(max_degree): """Check value of argument 'n' against the maximum polynomial degree of the finite element. If user ask for n>max_degree return an appropriate number of zeros in the 'values' array. If n==0, simply direct call to evaluate_basis.""" code = [] # If n == 0, call evaluate_basis. code += [format["comment"]("Call evaluate_basis if order of derivatives is equal to zero.")] cond = format["argument derivative order"] + format["is equal"] + format["int"](0) val = [format["call basis"](format["argument dof num"], format["argument values"])] val += [format["return"]("")] code += [format["if"](cond, indent("\n".join(val),2))] # If n > max_degree, derivatives are always zero. Since the appropriate number of # zeros have already been inserted into the 'values' array simply return. code += [format["comment"]("If order of derivatives is greater than the maximum polynomial degree, return zeros.")] cond = format["argument derivative order"] + format["greater than"] + format["int"](max_degree) val = format["return"]("") code += [format["if"](cond, val)] return code def _geometry_related_code(data, tdim, gdim, element_cellname): code = [] # Get code snippets for Jacobian, inverse of Jacobian and mapping of # coordinates from physical element to the FIAT reference element. code += [format["compute_jacobian"](tdim, gdim)] code += [format["compute_jacobian_inverse"](tdim, gdim)] if data["needs_oriented"]: code += [format["orientation"](tdim, gdim)] code += ["", format["fiat coordinate map"](element_cellname, gdim)] return code def _compute_num_derivatives(dimension, suffix=""): """Computes the number of derivatives of order 'n' as dimension()^n. Dimension will be the element topological dimension for the number of derivatives in local coordinates, and the geometric dimension for the number of derivatives in phyisical coordinates. """ # Prefetch formats. f_int = format["int"] f_num_derivs = format["num derivatives"](suffix) # Use loop to compute power since using std::pow() result in an # ambiguous call. code = [format["comment"]("Compute number of derivatives.")] code.append(format["declaration"](format["uint declaration"], f_num_derivs, f_int(1))) loop_vars = [(format["free indices"][0], 0, format["argument derivative order"])] lines = [format["imul"](f_num_derivs, f_int(dimension))] code += format["generate loop"](lines, loop_vars) return code def _generate_combinations(dimension, suffix, max_degree): "Generate all possible combinations of derivatives of order 'n'." nds = dimension**max_degree # Use code from format. code = ["", format["combinations"]\ % {"combinations": format["derivative combinations"](suffix),\ "dimension-1": dimension-1,\ "num_derivatives" : format["num derivatives"](suffix),\ "n": format["argument derivative order"], "max_num_derivatives":format["int"](nds), "max_degree":format["int"](max_degree)}] return code def _generate_transform(element_cellname, gdim, tdim, max_degree): """Generate the transformation matrix, which is used to transform derivatives from reference element back to the physical element.""" max_g_d = gdim**max_degree max_t_d = tdim**max_degree # Generate code to construct the inverse of the Jacobian if (element_cellname in ["interval", "triangle", "tetrahedron"]): code = ["", format["transform snippet"][element_cellname][gdim]\ % {"transform": format["transform matrix"],\ "num_derivatives" : format["num derivatives"](""),\ "n": format["argument derivative order"],\ "combinations": format["derivative combinations"](""),\ "K":format["transform Jinv"], "max_g_deriv":max_g_d, "max_t_deriv":max_t_d}] else: error("Cannot generate transform for shape: %s" % element_cellname) return code def _reset_values(data, suffix): "Reset all components of the 'values' array as it is a pointer to an array." # Prefetch formats. f_assign = format["assign"] f_r = format["free indices"][0] code = ["", format["comment"]("Reset values. Assuming that values is always an array.")] # Get value shape and reset values. This should also work for TensorElement, # scalar are empty tuples, therefore (1,) in which case value_shape = 1. physical_value_size = data["physical_value_size"] # Only multiply by value shape if different from 1. if physical_value_size == 1: num_vals = format["num derivatives"](suffix) else: num_vals = format["mul"]([format["int"](physical_value_size), format["num derivatives"](suffix)]) name = format["component"](format["argument values"], f_r) loop_vars = [(f_r, 0, num_vals)] lines = [f_assign(name, format["floating point"](0))] code += format["generate loop"](lines, loop_vars) return code + [""] def _generate_dof_code(data, dof_data): "Generate code for a basis." code = [] # Compute basisvalues, from evaluatebasis.py. code += _compute_basisvalues(data, dof_data) # Tabulate coefficients. code += _tabulate_coefficients(dof_data) # Tabulate coefficients for derivatives. code += _tabulate_dmats(dof_data) # Compute the derivatives of the basisfunctions on the reference (FIAT) element, # as the dot product of the new coefficients and basisvalues. code += _compute_reference_derivatives(data, dof_data) # Transform derivatives to physical element by multiplication with the transformation matrix. code += _transform_derivatives(data, dof_data) code = remove_unused("\n".join(code)) return code def _tabulate_dmats(dof_data): "Tabulate the derivatives of the polynomial base" code = [] # Prefetch formats to speed up code generation. f_table = format["static const float declaration"] f_dmats = format["dmats"] f_component = format["component"] f_decl = format["declaration"] f_tensor = format["tabulate tensor"] f_new_line = format["new line"] # Get derivative matrices (coefficients) of basis functions, computed by FIAT at compile time. derivative_matrices = dof_data["dmats"] code += [format["comment"]("Tables of derivatives of the polynomial base (transpose).")] # Generate tables for each spatial direction. for i, dmat in enumerate(derivative_matrices): # Extract derivatives for current direction (take transpose, FIAT_NEW PolynomialSet.tabulate()). matrix = numpy.transpose(dmat) # Get shape and check dimension (This is probably not needed). shape = numpy.shape(matrix) ffc_assert(shape[0] == shape[1] == dof_data["num_expansion_members"], "Something is wrong with the shape of dmats.") # Declare varable name for coefficients. name = f_component(f_dmats(i), [shape[0], shape[1]]) code += [f_decl(f_table, name, f_new_line + f_tensor(matrix)), ""] return code def _reset_dmats(shape_dmats, indices): "Set values in dmats equal to the identity matrix." f_assign = format["assign"] f_float = format["floating point"] i,j = indices code = [format["comment"]("Resetting dmats values to compute next derivative.")] dmats_old = format["component"](format["dmats"](""), [i, j]) lines = [f_assign(dmats_old, f_float(0.0))] lines += [format["if"](i + format["is equal"] + j,\ f_assign(dmats_old, f_float(1.0)))] loop_vars = [(i, 0, shape_dmats[0]), (j, 0, shape_dmats[1])] code += format["generate loop"](lines, loop_vars) return code def _update_dmats(shape_dmats, indices): "Update values in dmats_old with values in dmats and set values in dmats to zero." f_assign = format["assign"] f_component = format["component"] i,j = indices code = [format["comment"]("Updating dmats_old with new values and resetting dmats.")] dmats = f_component(format["dmats"](""), [i, j]) dmats_old = f_component(format["dmats old"], [i, j]) lines = [f_assign(dmats_old, dmats), f_assign(dmats, format["floating point"](0.0))] loop_vars = [(i, 0, shape_dmats[0]), (j, 0, shape_dmats[1])] code += format["generate loop"](lines, loop_vars) return code def _compute_dmats(num_dmats, shape_dmats, available_indices, deriv_index, _t): "Compute values of dmats as a matrix product." f_comment = format["comment"] s, t, u = available_indices # Reset dmats_old code = _reset_dmats(shape_dmats, [t, u]) code += ["", f_comment("Looping derivative order to generate dmats.")] # Set dmats matrix equal to dmats_old lines = _update_dmats(shape_dmats, [t, u]) lines += ["", f_comment("Update dmats using an inner product.")] # Create dmats matrix by multiplication comb = format["component"](format["derivative combinations"](_t), [deriv_index, s]) for i in range(num_dmats): lines += _dmats_product(shape_dmats, comb, i, [t, u]) loop_vars = [(s, 0, format["argument derivative order"])] code += format["generate loop"](lines, loop_vars) return code def _dmats_product(shape_dmats, index, i, indices): "Create product to update dmats." f_loop = format["generate loop"] f_component = format["component"] t, u = indices tu = t + u dmats = f_component(format["dmats"](""), [t, u]) dmats_old = f_component(format["dmats old"], [tu, u]) value = format["multiply"]([f_component(format["dmats"](i), [t, tu]), dmats_old]) name = format["iadd"](dmats, value) lines = f_loop([name], [(tu, 0, shape_dmats[0])]) loop_vars = [(t, 0, shape_dmats[0]), (u, 0, shape_dmats[1])] code = [format["if"](index + format["is equal"] + str(i),\ "\n".join(f_loop(lines, loop_vars)))] return code def _compute_reference_derivatives(data, dof_data): """Compute derivatives on the reference element by recursively multiply coefficients with the relevant derivatives of the polynomial base until the requested order of derivatives has been reached. After this take the dot product with the basisvalues.""" # Prefetch formats to speed up code generation f_comment = format["comment"] f_num_derivs = format["num derivatives"] f_mul = format["mul"] f_int = format["int"] f_matrix_index = format["matrix index"] f_coefficients = format["coefficients"] # f_dof = format["local dof"] f_basisvalues = format["basisvalues"] f_const_double = format["const float declaration"] f_group = format["grouping"] f_transform = format["transform"] f_double = format["float declaration"] f_component = format["component"] f_tmp = format["tmp ref value"] f_dmats = format["dmats"] f_dmats_old = format["dmats old"] f_assign = format["assign"] f_decl = format["declaration"] f_iadd = format["iadd"] f_add = format["add"] f_tensor = format["tabulate tensor"] f_new_line = format["new line"] f_loop = format["generate loop"] f_derivatives = format["reference derivatives"] f_array = format["dynamic array"] f_float = format["floating point"] f_inv = format["inverse"] f_detJ = format["det(J)"] f_inner = format["inner product"] f_r, f_s, f_t, f_u = format["free indices"] tdim = data["topological_dimension"] gdim = data["geometric_dimension"] max_degree = data["max_degree"] if tdim == gdim: _t = "" _g = "" else: _t = "_t" _g = "_g" # Get number of components. num_components = dof_data["num_components"] # Get shape of derivative matrix (they should all have the same shape) and # verify that it is a square matrix. shape_dmats = numpy.shape(dof_data["dmats"][0]) ffc_assert(shape_dmats[0] == shape_dmats[1],\ "Something is wrong with the dmats:\n%s" % str(dof_data["dmats"])) code = [f_comment("Compute reference derivatives.")] # Declare pointer to array that holds derivatives on the FIAT element code += [f_comment("Declare array of derivatives on FIAT element.")] # The size of the array of reference derivatives is equal to the number of derivatives # times the number of components of the basis element if (num_components == 1): num_vals = f_num_derivs(_t) else: num_vals = f_mul([f_int(num_components), f_num_derivs(_t)]) nds = tdim**max_degree*num_components code += [format["declaration"](f_double, f_component(f_derivatives, f_int(nds)))] line = [f_assign(f_component(f_derivatives, f_r), f_float(0.0))] code += f_loop(line, [(f_r, 0, nds)]) code += [""] mapping = dof_data["mapping"] if "piola" in mapping: # In either of the Piola cases, the value space of the derivatives is the geometric dimension rather than the topological dimension. code += [f_comment("Declare array of reference derivatives on physical element.")] _p = "_p" num_components_p = gdim nds = tdim**max_degree*gdim code += [format["declaration"](f_double, f_component(f_derivatives+_p, f_int(nds)))] line = [f_assign(f_component(f_derivatives+_p, f_r), f_float(0.0))] code += f_loop(line, [(f_r, 0, nds)]) code += [""] else: _p = "" num_components_p = num_components # Declare matrix of dmats (which will hold the matrix product of all combinations) # and dmats_old which is needed in order to perform the matrix product. value = f_tensor(numpy.eye(shape_dmats[0])) code += [f_comment("Declare derivative matrix (of polynomial basis).")] name = f_component(f_dmats(""), [shape_dmats[0], shape_dmats[1]]) code += [f_decl(f_double, name, f_new_line + value), ""] code += [f_comment("Declare (auxiliary) derivative matrix (of polynomial basis).")] name = f_component(f_dmats_old, [shape_dmats[0], shape_dmats[1]]) code += [f_decl(f_double, name, f_new_line + value), ""] # Compute dmats as a recursive matrix product lines = _compute_dmats(len(dof_data["dmats"]), shape_dmats, [f_s, f_t, f_u], f_r, _t) # Compute derivatives for all components lines_c = [] for i in range(num_components): name = f_component(f_derivatives, f_matrix_index(i, f_r, f_num_derivs(_t))) coeffs = f_component(f_coefficients(i), f_s) dmats = f_component(f_dmats(""), [f_s, f_t]) basis = f_component(f_basisvalues, f_t) lines_c.append(f_iadd(name, f_mul([coeffs, dmats, basis]))) loop_vars_c = [(f_s, 0, shape_dmats[0]),(f_t, 0, shape_dmats[1])] lines += f_loop(lines_c, loop_vars_c) # Apply transformation if applicable. if mapping == "affine": pass elif mapping == "contravariant piola": lines += ["", f_comment\ ("Using contravariant Piola transform to map values back to the physical element.")] # Get temporary values before mapping. lines += [f_const_double(f_tmp(i),\ f_component(f_derivatives, f_matrix_index(i, f_r, f_num_derivs(_t)))) for i in range(num_components)] # Create names for inner product. basis_col = [f_tmp(j) for j in range(tdim)] for i in range(num_components_p): # Create Jacobian. jacobian_row = [f_transform("J", i, j, gdim, tdim, None) for j in range(tdim)] # Create inner product and multiply by inverse of Jacobian. inner = [f_mul([jacobian_row[j], basis_col[j]]) for j in range(tdim)] sum_ = f_group(f_add(inner)) value = f_mul([f_inv(f_detJ(None)), sum_]) name = f_component(f_derivatives+_p, f_matrix_index(i, f_r, f_num_derivs(_t))) lines += [f_assign(name, value)] elif mapping == "covariant piola": lines += ["", f_comment\ ("Using covariant Piola transform to map values back to the physical element")] # Get temporary values before mapping. lines += [f_const_double(f_tmp(i),\ f_component(f_derivatives, f_matrix_index(i, f_r, f_num_derivs(_t)))) for i in range(num_components)] # Create names for inner product. basis_col = [f_tmp(j) for j in range(tdim)] for i in range(num_components_p): # Create inverse of Jacobian. inv_jacobian_column = [f_transform("JINV", j, i, tdim, gdim, None) for j in range(tdim)] # Create inner product of basis and inverse of Jacobian. inner = [f_mul([inv_jacobian_column[j], basis_col[j]]) for j in range(tdim)] value = f_group(f_add(inner)) name = f_component(f_derivatives+_p, f_matrix_index(i, f_r, f_num_derivs(_t))) lines += [f_assign(name, value)] elif mapping == "pullback as metric": lines += ["", f_comment("Using metric pullback to map values back to the physical element")] lines += [f_const_double(f_tmp(i), f_component(f_derivatives, f_matrix_index(i, f_r, f_num_derivs(_t)))) for i in range(num_components)] basis_col = [f_tmp(j) for j in range(num_components)] for p in range(num_components): # unflatten the indices i = p // tdim l = p % tdim # g_il = K_ji G_jk K_kl value = f_group(f_inner( [f_inner([f_transform("JINV", j, i, tdim, gdim, None) for j in range(tdim)], [basis_col[j * tdim + k] for j in range(tdim)]) for k in range(tdim)], [f_transform("JINV", k, l, tdim, gdim, None) for k in range(tdim)])) name = f_component(f_derivatives+_p, f_matrix_index(p, f_r, f_num_derivs(_t))) lines += [f_assign(name, value)] else: error("Unknown mapping: %s" % mapping) # Generate loop over number of derivatives. # Loop all derivatives and compute value of the derivative as: # deriv_on_ref[r] = coeff[dof][s]*dmat[s][t]*basis[t] code += [f_comment("Loop possible derivatives.")] loop_vars = [(f_r, 0, f_num_derivs(_t))] code += f_loop(lines, loop_vars) return code + [""] def _transform_derivatives(data, dof_data): """Transform derivatives back to the physical element by applying the transformation matrix.""" # Prefetch formats to speed up code generation. f_loop = format["generate loop"] f_num_derivs = format["num derivatives"] f_derivatives = format["reference derivatives"] f_values = format["argument values"] f_mul = format["mul"] f_iadd = format["iadd"] f_component = format["component"] f_transform = format["transform matrix"] f_r, f_s = format["free indices"][:2] f_index = format["matrix index"] if data["topological_dimension"]==data["geometric_dimension"]: _t = "" _g = "" else: _t = "_t" _g = "_g" # Get number of components and offset. num_components = dof_data["num_components"] offset = dof_data["offset"] mapping = dof_data["mapping"] if "piola" in mapping: # In either of the Piola cases, the value space of the derivatives is the geometric dimension rather than the topological dimension. _p = "_p" num_components_p = data["geometric_dimension"] else: _p = "" num_components_p = num_components code = [format["comment"]("Transform derivatives back to physical element")] lines = [] for i in range(num_components_p): access_name = f_index(offset + i, f_r, f_num_derivs(_g)) name = f_component(f_values, access_name) access_val = f_index(i, f_s, f_num_derivs(_t)) value = f_mul([f_component(f_transform, [f_r, f_s]), f_component(f_derivatives+_p, access_val)]) lines += [f_iadd(name, value)] loop_vars = [(f_r, 0, f_num_derivs(_g)), (f_s, 0, f_num_derivs(_t))] code += f_loop(lines, loop_vars) return code ffc-1.6.0/ffc/evaluatedof.py000066400000000000000000000313151255571034100156710ustar00rootroot00000000000000"""Code generation for evaluate_dof. This module generates the functions evaluate_dof and evaluate_dofs. These evaluate the degree of freedom (dof) number i and all degrees of freedom for an element respectively. Each dof L is assumed to act on a field f in the following manner: L(f) = w_{j, k} f_k(x_j) where w is a set of weights, j is an index set corresponding to the number of points involved in the evaluation of the functional, and k is a multi-index set with rank corresponding to the value rank of the function f. For common degrees of freedom such as point evaluations and directional component evaluations, there is just one point. However, for various integral moments, the integrals are evaluated using quadrature. The number of points therefore correspond to the quadrature points. The points x_j, weights w_{j, k} and components k are extracted from FIAT (functional.pt_dict) in the intermediate representation stage. """ # Copyright (C) 2009 Marie E. Rognes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Kristian B. Oelgaard 2010-2011 # Modified by Anders Logg 2013 # Modified by Lizao Li 2015 # # First added: 2009-xx-yy # Last changed: 2015-03-20 from ffc.cpp import format, remove_unused from ffc.utils import pick_first from ufl.permutation import build_component_numbering __all__ = ["evaluate_dof_and_dofs", "affine_weights"] # Prefetch formats: comment = format["comment"] declare = format["declaration"] assign = format["assign"] component = format["component"] iadd = format["iadd"] inner = format["inner product"] add = format["addition"] multiply = format["multiply"] J = format["J"] Jinv = format["inv(J)"] detJ = format["det(J)"](None) ret = format["return"] f_i = format["argument dof num"] f_values = format["argument values"] f_double = format["float declaration"] f_vals = format["dof vals"] f_result = format["dof result"] f_y = format["dof physical coordinates"] f_x = format["vertex_coordinates"] f_int = format["int declaration"] f_X = format["dof X"] f_D = format["dof D"] f_W = format["dof W"] f_copy = format["dof copy"] f_r, f_s = format["free indices"][:2] f_loop = format["generate loop"] map_onto_physical = format["map onto physical"] def evaluate_dof_and_dofs(ir): "Generate code for evaluate_dof and evaluate_dof." # Generate common code (reqs, cases) = _generate_common_code(ir) # Combine each case with returns for evaluate_dof and switch dof_cases = ["%s\n%s" % (c, ret(r)) for (c, r) in cases] dof_code = reqs + format["switch"](f_i, dof_cases, ret(format["float"](0.0))) # Combine each case with assignments for evaluate_dofs dofs_cases = "\n".join("%s\n%s" % (c, format["assign"](component(f_values, i), r)) for (i, (c, r)) in enumerate(cases)) dofs_code = reqs + dofs_cases return (dof_code, dofs_code) def _generate_common_code(ir): # Define necessary geometry information based on the ir reqs = _required_declarations(ir) # Extract variables mappings = ir["mappings"] offsets = ir["physical_offsets"] gdim = ir["geometric_dimension"] tdim = ir["topological_dimension"] # Generate bodies for each degree of freedom cases = [_generate_body(i, dof, mappings[i], gdim, tdim, offsets[i]) for (i, dof) in enumerate(ir["dofs"])] return (reqs, cases) def _required_declarations(ir): """Generate code for declaring required variables and geometry information. """ # Enriched element, no dofs defined if not any(ir["dofs"]): return "" code = [] gdim = ir["geometric_dimension"] tdim = ir["topological_dimension"] # Declare variable for storing the result and physical coordinates code.append(comment("Declare variables for result of evaluation")) code.append(declare(f_double, component(f_vals, ir["physical_value_size"]))) code.append("") code.append(comment("Declare variable for physical coordinates")) code.append(declare(f_double, component(f_y, gdim))) code.append("") # Check whether Jacobians are necessary. needs_inverse_jacobian = any(["contravariant piola" in m for m in ir["mappings"]]) needs_jacobian = any(["covariant piola" in m for m in ir["mappings"]]) or any(["pullback as metric" in m for m in ir["mappings"]]) # Check if Jacobians are needed if not (needs_jacobian or needs_inverse_jacobian): return "\n".join(code) # Otherwise declare intermediate result variable code.append(declare(f_double, f_result)) # Add sufficient Jacobian information. Note: same criterion for # needing inverse Jacobian as for needing oriented Jacobian code.append(format["compute_jacobian"](tdim, gdim)) if needs_inverse_jacobian: code.append("") code.append(format["compute_jacobian_inverse"](tdim, gdim)) code.append("") code.append(format["orientation"](tdim, gdim)) return "\n".join(code) def _generate_body(i, dof, mapping, gdim, tdim, offset=0, result=f_result): "Generate code for a single dof." # EnrichedElement is handled by having [None, ..., None] dual basis if not dof: return (format["exception"]("evaluate_dof(s) for enriched element " "not implemented."), 0.0) points = list(dof.keys()) # Generate different code if multiple points. (Otherwise ffc # compile time blows up.) if len(points) > 1: code = _generate_multiple_points_body(i, dof, mapping, gdim, tdim, offset, result) return (code, result) # Get weights for mapping reference point to physical x = points[0] w = affine_weights(tdim)(x) # Map point onto physical element: y = F_K(x) code = [] for j in range(gdim): y = inner(w, [component(f_x(), (k*gdim + j,)) for k in range(tdim + 1)]) code.append(assign(component(f_y, j), y)) # Evaluate function at physical point code.append(format["evaluate function"]) # Map function values to the reference element F = _change_variables(mapping, gdim, tdim, offset) # Simple affine functions deserve special case: if len(F) == 1: return ("\n".join(code), multiply([dof[x][0][0], F[0]])) # Flatten multiindices (index_map, _) = build_component_numbering([tdim] * len(dof[x][0][1]), ()) # Take inner product between components and weights value = add([multiply([w, F[index_map[k]]]) for (w, k) in dof[x]]) # Assign value to result variable code.append(assign(result, value)) return ("\n".join(code), result) def _generate_multiple_points_body(i, dof, mapping, gdim, tdim, offset=0, result=f_result): "Generate c++ for-loop for multiple points (integral bodies)" code = [assign(f_result, 0.0)] points = list(dof.keys()) n = len(points) # Get number of tokens per point tokens = [dof[x] for x in points] len_tokens = pick_first([len(t) for t in tokens]) # Declare points points = format["list"]([format["list"](x) for x in points]) code += [declare(f_double, component(f_X(i), [n, tdim]), points)] # Declare components components = [[c[0] for (w, c) in token] for token in tokens] components = format["list"]([format["list"](c) for c in components]) code += [declare(f_int, component(f_D(i), [n, len_tokens]), components)] # Declare weights weights = [[w for (w, c) in token] for token in tokens] weights = format["list"]([format["list"](w) for w in weights]) code += [declare(f_double, component(f_W(i), [n, len_tokens]), weights)] # Declare copy variable: code += [declare(f_double, component(f_copy(i), tdim))] # Add loop over points code += [comment("Loop over points")] # Map the points from the reference onto the physical element #assert(gdim == tdim), \ # "Integral moments not supported for manifolds (yet). Please fix" lines_r = [map_onto_physical[tdim][gdim] % {"i": i, "j": f_r}] # Evaluate function at physical point lines_r.append(comment("Evaluate function at physical point")) lines_r.append(format["evaluate function"]) # Map function values to the reference element lines_r.append(comment("Map function to reference element")) F = _change_variables(mapping, gdim, tdim, offset) lines_r += [assign(component(f_copy(i), k), F_k) for (k, F_k) in enumerate(F)] # Add loop over directional components lines_r.append(comment("Loop over directions")) value = multiply([component(f_copy(i), component(f_D(i), (f_r, f_s))), component(f_W(i), (f_r, f_s))]) # Add value from this point to total result lines_s = [iadd(f_result, value)] # Generate loop over s and add to r. loop_vars_s = [(f_s, 0, len_tokens)] lines_r += f_loop(lines_s, loop_vars_s) # Generate loop over r and add to code. loop_vars_r = [(f_r, 0, n)] code += f_loop(lines_r, loop_vars_r) code = "\n".join(code) return code def _change_variables(mapping, gdim, tdim, offset): """Generate code for mapping function values according to 'mapping' and offset. The basics of how to map a field from a physical to the reference domain. (For the inverse approach -- see interpolatevertexvalues) Let g be a field defined on a physical domain T with physical coordinates x. Let T_0 be a reference domain with coordinates X. Assume that F: T_0 -> T such that x = F(X) Let J be the Jacobian of F, i.e J = dx/dX and let K denote the inverse of the Jacobian K = J^{-1}. Then we (currently) have the following four types of mappings: 'affine' mapping for g: G(X) = g(x) For vector fields g: 'contravariant piola' mapping for g: G(X) = det(J) K g(x) i.e G_i(X) = det(J) K_ij g_j(x) 'covariant piola' mapping for g: G(X) = J^T g(x) i.e G_i(X) = J^T_ij g(x) = J_ji g_j(x) 'pullback as metric' mapping for g: G(X) = J^T g(x) J i.e. G_il(X) = J_ji g_jk(x) J_kl """ # meg: Various mappings must be handled both here and in # interpolate_vertex_values. Could this be abstracted out? if mapping == "affine": return [component(f_vals, offset)] elif mapping == "contravariant piola": # Map each component from physical to reference using inverse # contravariant piola values = [] for i in range(tdim): inv_jacobian_row = [Jinv(i, j, tdim, gdim) for j in range(gdim)] components = [component(f_vals, j + offset) for j in range(gdim)] values += [multiply([detJ, inner(inv_jacobian_row, components)])] return values elif mapping == "covariant piola": # Map each component from physical to reference using inverse # covariant piola values = [] for i in range(tdim): jacobian_column = [J(j, i, gdim, tdim) for j in range(gdim)] components = [component(f_vals, j + offset) for j in range(gdim)] values += [inner(jacobian_column, components)] return values elif mapping == "pullback as metric": # physical to reference pullback as a metric values = [] for i in range(tdim): for l in range(tdim): values += [inner( [inner([J(j, i, gdim, tdim) for j in range(gdim)], [component(f_vals, j * tdim + k + offset) for j in range(gdim)]) for k in range(gdim)], [J(k, l, gdim, tdim) for k in range(gdim)])] return values else: raise Exception("The mapping (%s) is not allowed" % mapping) return code def affine_weights(dim): "Compute coefficents for mapping from reference to physical element" if dim == 1: return lambda x: (1.0 - x[0], x[0]) elif dim == 2: return lambda x: (1.0 - x[0] - x[1], x[0], x[1]) elif dim == 3: return lambda x: (1.0 - x[0] - x[1] - x[2], x[0], x[1], x[2]) ffc-1.6.0/ffc/extras.py000066400000000000000000000070111255571034100146740ustar00rootroot00000000000000"This modules provides additional functionality for users of FFC." # Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-09-03 # Last changed: 2010-09-06 __all__ = ["compute_tensor_representation"] # Python modules from time import time # FFC modules from ffc.compiler import _print_timing from ffc.parameters import default_parameters from ffc.analysis import analyze_forms from ffc.representation import compute_ir from ffc.optimization import optimize_ir from ffc.codegeneration import generate_code def compute_tensor_representation(form): """Compute tensor representation for given form. This function may be useful for those (Hi Matt!) that want to access the FFC tensor representation from outside FFC.""" # Set parameters parameters = default_parameters() parameters["representation"] = "tensor" #parameters["optimize"] = "optimize" # The below steps basically duplicate the compiler process but # skip the code formatting step. Instead, we extract the relevant # portions for tabulate_tensor. # Stage 1: analysis cpu_time = time() analysis = analyze_forms([form], {}, parameters) _print_timing(1, time() - cpu_time) # Stage 2: intermediate representation cpu_time = time() ir = compute_ir(analysis, parameters) _print_timing(2, time() - cpu_time) # Stage 3: optimization cpu_time = time() oir = optimize_ir(ir, parameters) _print_timing(3, time() - cpu_time) # Stage 4: code generation cpu_time = time() code = generate_code(oir, "foo", parameters) _print_timing(4, time() - cpu_time) # Extract representations ir_elements, ir_dofmaps, ir_integrals, ir_forms = ir # Extract entries in reference tensor reference_tensors = [] for i in ir_integrals: if i["integral_type"] == "cell": t = [A0.A0 for (A0, GK, dummy) in i["AK"]] if len(t) == 1: t = t[0] elif i["integral_type"] == "exterior_facet": t = [A0.A0 for j in i["AK"] for (A0, GK, dummy) in j] if len(t) == 1: t = t[0] elif i["integral_type"] == "interior_facet": t = [A0.A0 for j in i["AK"] for k in j for (A0, GK, dummy) in k] if len(t) == 1: t = t[0] else: raise RuntimeError("Unhandled domain type: %s" % str(i["integral_type"])) reference_tensors.append(t) # Extract code code_elements, code_dofmaps, code_integrals, code_forms = code # Extract code for computing the geometry tensor geometry_tensor_codes = [c["tabulate_tensor"].split("// Compute element tensor")[0] for c in code_integrals] # Simplify return values when there is just one term if len(reference_tensors) == 1: reference_tensors = reference_tensors[0] if len(geometry_tensor_codes) == 1: geometry_tensor_codes = geometry_tensor_codes[0] return reference_tensors, geometry_tensor_codes ffc-1.6.0/ffc/fiatinterface.py000066400000000000000000000261441255571034100162020ustar00rootroot00000000000000# Copyright (C) 2009-2013 Kristian B. Oelgaard and Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Garth N. Wells, 2009. # Modified by Marie Rognes, 2009-2013. # Modified by Martin Alnaes, 2013 # Modified by Lizao Li, 2015 # Python modules from numpy import array import six # UFL and FIAT modules import ufl from ufl.utils.sorting import sorted_by_key import FIAT from FIAT.trace import DiscontinuousLagrangeTrace # FFC modules from ffc.log import debug, error, ffc_assert from ffc.quadratureelement import QuadratureElement as FFCQuadratureElement from ffc.mixedelement import MixedElement from ffc.restrictedelement import RestrictedElement from ffc.enrichedelement import EnrichedElement, SpaceOfReals # Dictionary mapping from cellname to dimension from ufl.cell import cellname2dim # Element families supported by FFC supported_families = ("Brezzi-Douglas-Marini", "Brezzi-Douglas-Fortin-Marini", "Crouzeix-Raviart", "Discontinuous Lagrange", "Discontinuous Raviart-Thomas", "Discontinuous Lagrange Trace", "Lagrange", "Lobatto", "Nedelec 1st kind H(curl)", "Nedelec 2nd kind H(curl)", "Radau", "Raviart-Thomas", "Real", "Bubble", "Quadrature", "Regge") # Cache for computed elements _cache = {} def reference_cell(dim): if isinstance(dim, int): return FIAT.ufc_simplex(dim) else: return FIAT.ufc_simplex(cellname2dim[dim]) def reference_cell_vertices(dim): "Return dict of coordinates of reference cell vertices for this 'dim'." cell = reference_cell(dim) return cell.get_vertices() def create_element(ufl_element): # Create element signature for caching (just use UFL element) element_signature = ufl_element # Check cache if element_signature in _cache: debug("Reusing element from cache") return _cache[element_signature] # Create regular FIAT finite element if isinstance(ufl_element, ufl.FiniteElement): element = _create_fiat_element(ufl_element) # Create mixed element (implemented by FFC) elif isinstance(ufl_element, ufl.MixedElement): elements = _extract_elements(ufl_element) element = MixedElement(elements) # Create element union (implemented by FFC) elif isinstance(ufl_element, ufl.EnrichedElement): elements = [create_element(e) for e in ufl_element._elements] element = EnrichedElement(elements) # Create restricted element(implemented by FFC) elif isinstance(ufl_element, ufl.RestrictedElement): element = _create_restricted_element(ufl_element) else: error("Cannot handle this element type: %s" % str(ufl_element)) # Store in cache _cache[element_signature] = element return element def _create_fiat_element(ufl_element): "Create FIAT element corresponding to given finite element." # Get element data family = ufl_element.family() domain, = ufl_element.domains() # Assuming single domain cellname = domain.cell().cellname() # Assuming single cell in domain degree = ufl_element.degree() # Check that FFC supports this element ffc_assert(family in supported_families, "This element family (%s) is not supported by FFC." % family) # Handle the space of the constant if family == "Real": dg0_element = ufl.FiniteElement("DG", domain, 0) constant = _create_fiat_element(dg0_element) element = SpaceOfReals(constant) # Handle the specialized time elements elif family == "Lobatto" : from ffc.timeelements import LobattoElement as FFCLobattoElement element = FFCLobattoElement(ufl_element.degree()) elif family == "Radau" : from ffc.timeelements import RadauElement as FFCRadauElement element = FFCRadauElement(ufl_element.degree()) # FIXME: AL: Should this really be here? # Handle QuadratureElement elif family == "Quadrature": element = FFCQuadratureElement(ufl_element) else: # Create FIAT cell fiat_cell = reference_cell(cellname) # Handle Bubble element as RestrictedElement of P_{k} to interior if family == "Bubble": V = FIAT.supported_elements["Lagrange"](fiat_cell, degree) dim = domain.topological_dimension() return RestrictedElement(V, _indices(V, "interior", dim), None) # Check if finite element family is supported by FIAT if not family in FIAT.supported_elements: error("Sorry, finite element of type \"%s\" are not supported by FIAT.", family) # Create FIAT finite element ElementClass = FIAT.supported_elements[family] if degree is None: element = ElementClass(fiat_cell) else: element = ElementClass(fiat_cell, degree) # Consistency check between UFL and FIAT elements. This will not hold for elements # where the reference value shape is different from the global value shape, i.e. # RT elements on a triangle in 3D. #ffc_assert(element.value_shape() == ufl_element.value_shape(), # "Something went wrong in the construction of FIAT element from UFL element." + \ # "Shapes are %s and %s." % (element.value_shape(), ufl_element.value_shape())) return element def create_quadrature(shape, num_points): """ Generate quadrature rule (points, weights) for given shape with num_points points in each direction. """ if isinstance(shape, int) and shape == 0: return ([()], array([1.0,])) if shape in cellname2dim and cellname2dim[shape] == 0: return ([()], array([1.0,])) quad_rule = FIAT.make_quadrature(reference_cell(shape), num_points) return quad_rule.get_points(), quad_rule.get_weights() def map_facet_points(points, facet): """ Map points from the e (UFC) reference simplex of dimension d - 1 to a given facet on the (UFC) reference simplex of dimension d. This may be used to transform points tabulated for example on the 2D reference triangle to points on a given facet of the reference tetrahedron. """ # Extract the geometric dimension of the points we want to map dim = len(points[0]) + 1 # Special case, don't need to map coordinates on vertices if dim == 1: return [[(0.0,), (1.0,)][facet]] # Get the FIAT reference cell for this dimension fiat_cell = reference_cell(dim) # Extract vertex coordinates from cell and map of facet index to # indicent vertex indices vertex_coordinates = fiat_cell.get_vertices() facet_vertices = fiat_cell.get_topology()[dim-1] #vertex_coordinates = \ # {1: ((0.,), (1.,)), # 2: ((0., 0.), (1., 0.), (0., 1.)), # 3: ((0., 0., 0.), (1., 0., 0.),(0., 1., 0.), (0., 0., 1))} # Facet vertices #facet_vertices = \ # {2: ((1, 2), (0, 2), (0, 1)), # 3: ((1, 2, 3), (0, 2, 3), (0, 1, 3), (0, 1, 2))} # Compute coordinates and map the points coordinates = [vertex_coordinates[v] for v in facet_vertices[facet]] new_points = [] for point in points: w = (1.0 - sum(point),) + tuple(point) x = tuple(sum([w[i]*array(coordinates[i]) for i in range(len(w))])) new_points += [x] return new_points def _extract_elements(ufl_element, domain=None): "Recursively extract un-nested list of (component) elements." elements = [] if isinstance(ufl_element, ufl.MixedElement): for sub_element in ufl_element.sub_elements(): elements += _extract_elements(sub_element, domain) return elements # Handle restricted elements since they might be mixed elements too. if isinstance(ufl_element, ufl.RestrictedElement): base_element = ufl_element.element() restriction = ufl_element.cell_restriction() return _extract_elements(base_element, restriction) if domain: ufl_element = ufl.RestrictedElement(ufl_element, domain) elements += [create_element(ufl_element)] return elements def _create_restricted_element(ufl_element): "Create an FFC representation for an UFL RestrictedElement." if not isinstance(ufl_element, ufl.RestrictedElement): error("create_restricted_element expects an ufl.RestrictedElement") base_element = ufl_element.element() restriction_domain = ufl_element.cell_restriction() # If simple element -> create RestrictedElement from fiat_element if isinstance(base_element, ufl.FiniteElement): element = _create_fiat_element(base_element) return RestrictedElement(element, _indices(element, restriction_domain), restriction_domain) # If restricted mixed element -> convert to mixed restricted element if isinstance(base_element, ufl.MixedElement): elements = _extract_elements(base_element, restriction_domain) return MixedElement(elements) error("Cannot create restricted element from %s" % str(ufl_element)) def _indices(element, restriction_domain, dim=0): "Extract basis functions indices that correspond to restriction_domain." # FIXME: The restriction_domain argument in FFC/UFL needs to be re-thought and # cleaned-up. # If restriction_domain is "interior", pick basis functions associated with # cell. if restriction_domain == "interior" and dim: return element.entity_dofs()[dim][0] # If restriction_domain is a ufl.Cell, pick basis functions associated with # the topological degree of the restriction_domain and of all lower # dimensions. if isinstance(restriction_domain, ufl.Cell): dim = restriction_domain.topological_dimension() entity_dofs = element.entity_dofs() indices = [] for dim in range(restriction_domain.topological_dimension() + 1): entities = entity_dofs[dim] for (entity, index) in sorted_by_key(entities): indices += index return indices # Just extract all indices to make handling in RestrictedElement # uniform. #elif isinstance(restriction_domain, ufl.Measure): # indices = [] # entity_dofs = element.entity_dofs() # for dim, entities in entity_dofs.items(): # for entity, index in entities.items(): # indices += index # return indices else: error("Restriction to domain: %s, is not supported." % repr(restriction_domain)) ffc-1.6.0/ffc/formatting.py000066400000000000000000000121731255571034100155450ustar00rootroot00000000000000""" Compiler stage 5: Code formatting --------------------------------- This module implements the formatting of UFC code from a given dictionary of generated C++ code for the body of each UFC function. It relies on templates for UFC code available as part of the module ufc_utils. """ # Copyright (C) 2009-2015 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # Python modules import os # FFC modules from ffc.log import info, error, begin, end, dstr from ffc import __version__ as FFC_VERSION from ffc.backends.ufc import __version__ as UFC_VERSION from ffc.cpp import format from ffc.backends.ufc import templates from ffc.parameters import compilation_relevant_parameters def format_code(code, wrapper_code, prefix, parameters): "Format given code in UFC format. Returns two strings with header and source file contents." begin("Compiler stage 5: Formatting code") # Extract code code_elements, code_dofmaps, code_integrals, code_forms = code # Header and implementation code code_h = "" code_c = "" # Generate code for comment on top of file code_h += _generate_comment(parameters) + "\n" code_c += _generate_comment(parameters) + "\n" # Generate code for header code_h += format["header_h"] % {"prefix_upper": prefix.upper()} code_h += _generate_additional_includes(code_integrals) + "\n" code_c += format["header_c"] % {"prefix": prefix} # Generate code for elements for code_element in code_elements: code_h += _format_h("finite_element", code_element, parameters) code_c += _format_c("finite_element", code_element, parameters) # Generate code for dofmaps for code_dofmap in code_dofmaps: code_h += _format_h("dofmap", code_dofmap, parameters) code_c += _format_c("dofmap", code_dofmap, parameters) # Generate code for integrals for code_integral in code_integrals: code_h += _format_h(code_integral["class_type"], code_integral, parameters) code_c += _format_c(code_integral["class_type"], code_integral, parameters) # Generate code for form for code_form in code_forms: code_h += _format_h("form", code_form, parameters) code_c += _format_c("form", code_form, parameters) # Add wrappers if wrapper_code: code_h += wrapper_code # Generate code for footer code_h += format["footer"] end() return code_h, code_c def write_code(code_h, code_c, prefix, parameters): # Write file(s) if parameters["split"]: _write_file(code_h, prefix, ".h", parameters) _write_file(code_c, prefix, ".cpp", parameters) else: _write_file(code_h, prefix, ".h", parameters) def _format_h(class_type, code, parameters): "Format header code for given class type." if parameters["split"]: return templates[class_type + "_header"] % code + "\n" else: return templates[class_type + "_combined"] % code + "\n" def _format_c(class_type, code, parameters): "Format implementation code for given class type." if parameters["split"]: return templates[class_type + "_implementation"] % code + "\n" else: return "" def _write_file(output, prefix, postfix, parameters): "Write generated code to file." filename = os.path.join(parameters["output_dir"], prefix + postfix) with open(filename, "w") as hfile: hfile.write(output) info("Output written to " + filename + ".") def _generate_comment(parameters): "Generate code for comment on top of file." # Drop irrelevant parameters parameters = compilation_relevant_parameters(parameters) # Generate top level comment args = {"ffc_version": FFC_VERSION, "ufc_version": UFC_VERSION} if parameters["format"] == "ufc": comment = format["ufc comment"] % args elif parameters["format"] == "dolfin": comment = format["dolfin comment"] % args else: error("Unable to format code, unknown format \"%s\".", parameters["format"]) # Add parameter information comment += format["comment"]("") + "\n" comment += format["comment"]("This code was generated with the following parameters:") + "\n" comment += format["comment"]("") comment += "\n".join([""] + [format["comment"](" " + l) for l in dstr(parameters).split("\n")][:-1]) comment += "\n" return comment def _generate_additional_includes(codes): s = set() for code in codes: if "additional_includes_set" in code: s.update(code["additional_includes_set"]) if s: return "\n".join(list(s)) + "\n" return "" ffc-1.6.0/ffc/interpolatevertexvalues.py000066400000000000000000000151151255571034100203760ustar00rootroot00000000000000"Code generation for interpolate_vertex_values." # Copyright (C) 2009 Marie E. Rognes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Kristian B. Oelgaard 2010 # Modified by Lizao Li 2015 # # Last changed: 2015-03-25 from ffc.cpp import format, remove_unused # Extract code manipulation formats inner = format["inner product"] component = format["component"] assign = format["assign"] multiply = format["multiply"] # Extract formats for the Jacobians J = format["J"] Jinv = format["inv(J)"] invdetJ = format["inverse"](format["det(J)"](None)) f_dof_values = format["argument dof values"] f_vertex_values = format["argument vertex values"] def interpolate_vertex_values(ir): "Generate code for interpolate_vertex_values." # Handle unsupported elements. if isinstance(ir, str): return format["exception"]("interpolate_vertex_values: %s" % ir) # Add code for Jacobian if necessary code = [] gdim = ir["geometric_dimension"] tdim = ir["topological_dimension"] if ir["needs_jacobian"]: # Generate code for basic geometric quantities code.append(format["compute_jacobian"](tdim, gdim)) code.append("") code.append(format["compute_jacobian_inverse"](tdim, gdim)) if ir["needs_oriented"]: code.append("") code.append(format["orientation"](tdim, gdim)) # Compute total value dimension for (mixed) element total_dim = ir["physical_value_size"] # Generate code for each element value_offset = 0 space_offset = 0 for data in ir["element_data"]: # Add vertex interpolation for this element code.append(format["comment"]("Evaluate function and change variables")) code.append(_interpolate_vertex_values_element(data, gdim, tdim, total_dim, value_offset, space_offset)) # Update offsets for value- and space dimension value_offset += data["physical_value_size"] space_offset += data["space_dim"] # Remove unused variables. (Not tracking set of used variables in # order to keep this code clean. Since generated code is of # limited size, this should be ok.) clean_code = remove_unused("\n".join(code)) return clean_code def _interpolate_vertex_values_element(data, gdim, tdim, total_value_size, value_offset=0, space_offset=0): # Extract vertex values for all basis functions vertex_values = data["basis_values"] value_size = data["physical_value_size"] space_dim = data["space_dim"] mapping = data["mapping"] # Map basis values according to element mapping. Assumes single # mapping for each (non-mixed) element change_of_variables = _change_variables(data["mapping"], gdim, tdim, space_dim) # Create code for each value dimension: code = [] for k in range(value_size): # Create code for each vertex x_j for (j, values_at_vertex) in enumerate(vertex_values): if value_size == 1: values_at_vertex = [values_at_vertex] # Map basis functions using appropriate mapping components = change_of_variables(values_at_vertex, k) # Contract coefficients and basis functions dof_values = [component(f_dof_values, i + space_offset) for i in range(space_dim)] value = inner(dof_values, components) # Assign value to correct vertex index = j*total_value_size + (k + value_offset) code.append(assign(component(f_vertex_values, index), value)) return "\n".join(code) def _change_variables(mapping, gdim, tdim, space_dim): """ How to map a field G from the reference domain to a physical domain: For the converse approach -- see evaluatedof.py Let g be a field defined on the reference domain T_0 (of dimension tdim) with reference coordinates X. Let T be a a physical domain (of dimension gdim) with coordinates x. Assume that F: T_0 -> T such that x = F(X) Let J be the Jacobian of F, i.e J = dx/dX and let K denote the (pseudo)-inverse of the Jacobian K = J^{-1}. Note that J is gdim x tdim, and conversely K is tdim x gdim. Then we (currently) have the following four types of mappings: 'affine' mapping for G: g(x) = G(X) For vector fields G: 'contravariant piola' mapping for f: g(x) = 1.0/det(J) J G(X) i.e g_i(x) = 1.0/det(J) J_ij G_j(X) 'covariant piola' mapping for f: g(x) = K^T G(X) i.e g_i(x) = K^T_ij G_j(X) = K_ji G_j(X) 'pullback as metric' mapping for f: g_il(x) = K_{ji} G_{jk} K_{kl} """ if mapping is "affine": change_of_variables = lambda G, i: G[i] elif mapping == "contravariant piola": change_of_variables = lambda G, i: [multiply([invdetJ, inner([J(i, j, gdim, tdim) for j in range(tdim)], [G[j][index] for j in range(tdim)])]) for index in range(space_dim)] elif mapping == "covariant piola": change_of_variables = lambda G, i: [inner([Jinv(j, i, tdim, gdim) for j in range(tdim)], [G[j][index] for j in range(tdim)]) for index in range(space_dim)] elif mapping == "pullback as metric": change_of_variables = lambda G, i: [ inner([inner([Jinv(j, i // tdim, tdim, gdim) for j in range(tdim)], [G[j][k][index] for j in range(tdim)]) for k in range(tdim)], [Jinv(k, i % tdim, tdim, gdim) for k in range(tdim)]) for index in range(space_dim)] else: raise Exception("No such mapping: %s accepted" % mapping) return change_of_variables ffc-1.6.0/ffc/jitcompiler.py000066400000000000000000000165071255571034100157210ustar00rootroot00000000000000"""This module provides a just-in-time (JIT) form compiler. It uses Instant to wrap the generated code into a Python module.""" # Copyright (C) 2007-2015 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Johan Hake, 2008-2009 # Modified by Ilmar Wilbers, 2008 # Modified by Kristian B. Oelgaard, 2009 # Modified by Joachim Haga, 2011. # Modified by Martin Alnaes, 2013-2015 # Python modules import os, sys import instant import ufc # UFL modules from ufl.common import istr, tstr from ufl import TestFunction, ds, dx from ufl.classes import Form, FiniteElementBase from ufl.algorithms import extract_elements, extract_sub_elements, compute_form_data # FFC modules from ffc.log import log from ffc.log import info from ffc.log import warning from ffc.log import debug from ffc.log import error from ffc.log import set_level from ffc.log import set_prefix from ffc.log import INFO from ffc.parameters import default_parameters from ffc.mixedelement import MixedElement from ffc.compiler import compile_form from ffc.jitobject import JITObject from ffc.quadratureelement import default_quadrature_degree from ffc.backends.ufc import build_ufc_module # Special Options for JIT-compilation FFC_PARAMETERS_JIT = default_parameters() FFC_PARAMETERS_JIT["no-evaluate_basis_derivatives"] = True # Set debug level for Instant instant.set_log_level("warning") def jit(ufl_object, parameters=None): """Just-in-time compile the given form or element Parameters: ufl_object : The UFL object to be compiled parameters : A set of parameters """ # Check if we get an element or a form if isinstance(ufl_object, FiniteElementBase): return jit_element(ufl_object, parameters) else: return jit_form(ufl_object, parameters) def check_swig_version(compiled_module): # Check swig version of compiled module if compiled_module and compiled_module.swigversion != ufc.__swigversion__: error("Incompatible swig versions detected. UFC swig "\ "version is not the same as extension module swig "\ "version: '%s' != '%s' " % \ (ufc.__swigversion__, compiled_module.swigversion)) def jit_form(form, parameters=None): "Just-in-time compile the given form." # Check that we get a Form if not isinstance(form, Form): error("Unable to convert object to a UFL form: %s" % repr(form)) # Check parameters parameters = _check_parameters(form, parameters) # Set log level set_level(parameters["log_level"]) set_prefix(parameters["log_prefix"]) # Wrap input jit_object = JITObject(form, parameters) # Set prefix for generated code module_name = "ffc_form_" + jit_object.signature() # Use Instant cache if possible cache_dir = parameters["cache_dir"] or None module = instant.import_module(module_name, cache_dir=cache_dir) if module: debug("Reusing form from cache.") else: # Take lock to serialise file removal. # Need to add "_0" to lock as instant.import_module acquire # lock with name: module_name with instant.file_lock(instant.get_default_cache_dir(), module_name + "_0") as lock: # Retry Instant cache. The module may have been created while we waited # for the lock, even if it didn't exist before. module = instant.import_module(module_name, cache_dir=cache_dir) if module: debug("Reusing form from cache.") else: # Write a message log(INFO + 5, "Calling FFC just-in-time (JIT) compiler, this may take some time.") # Generate code compile_form(form, prefix=module_name, parameters=parameters) # Build module using Instant (through UFC) debug("Compiling and linking Python extension module, this may take some time.") hfile = module_name + ".h" cppfile = module_name + ".cpp" if parameters["cpp_optimize"]: cppargs = parameters["cpp_optimize_flags"].split() else: cppargs = ["-O0"] module = build_ufc_module( hfile, source_directory = os.curdir, signature = module_name, sources = [cppfile] if parameters["split"] else [], cppargs = cppargs, cache_dir = cache_dir) # Remove code if os.path.isfile(hfile): os.unlink(hfile) if parameters["split"] : if os.path.isfile(cppfile): os.unlink(cppfile) # Extract compiled form check_swig_version(module) compiled_form = _extract_form(module, module_name) return compiled_form, module, module_name def jit_element(element, parameters=None): "Just-in-time compile the given element" # FIXME: We need a new solution for this. # Check that we get an element if not isinstance(element, FiniteElementBase): error("Expecting a finite element.") # Create simplest possible dummy form v = TestFunction(element) ii = (0,)*v.rank() if element.family() == "Discontinuous Lagrange Trace": form = v[ii]*ds else: form = v[ii]*dx # Compile form compiled_form, module, prefix = jit_form(form, parameters) form_data = compute_form_data(form) return _extract_element_and_dofmap(module, prefix, form_data) def _check_parameters(form, parameters): "Check parameters and add any missing parameters" # Form can not be a list if isinstance(form, list): error("JIT compiler requires a single form (not a list of forms).") # Copy parameters if parameters is None: parameters = {} else: parameters = parameters.copy() # Add defaults for missing parameters for key in FFC_PARAMETERS_JIT: if not key in parameters: parameters[key] = FFC_PARAMETERS_JIT[key] # Don't postfix form names parameters["form_postfix"] = False return parameters def _extract_form(module, prefix): "Extract form from module." return getattr(module, prefix + "_form_0")() def _extract_element_and_dofmap(module, prefix, form_data): """ Extract element and dofmap from module. Code will be generated for all unique elements (including sub elements) and to get the top level element we need to extract the last element. """ i = len(form_data.unique_sub_elements) - 1 return (getattr(module, prefix + ("_finite_element_%d" % i))(), getattr(module, prefix + ("_dofmap_%d" % i))()) ffc-1.6.0/ffc/jitobject.py000066400000000000000000000070701255571034100153500ustar00rootroot00000000000000# Copyright (C) 2008-2013 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Martin Alnaes, 2013 # Python modules. from hashlib import sha1 # UFL modules. import ufl from ufl.utils.sorting import canonicalize_metadata # FFC modules. from ffc import __version__ as FFC_VERSION from ffc.parameters import compilation_relevant_parameters # UFC modules. from ffc.backends import ufc # Compute signature of all ufc headers combined ufc_signature = sha1(''.join(getattr(ufc, header) for header in (k for k in sorted(vars(ufc).keys()) if k.endswith("_header"))).encode('utf-8') ).hexdigest() class JITObject: """This class is a wrapper for a compiled object in the context of specific compiler parameters. A JITObject is identified either by its hash value or by its signature. The hash value is valid only in a single instance of an application (at runtime). The signature is persistent and may be used for caching modules on disk.""" def __init__(self, form, parameters): "Create JITObject for given form and parameters" assert(isinstance(form, ufl.Form)) # Store data self.form = form self.parameters = parameters self._hash = None self._signature = None def __hash__(self): "Return unique integer for form + parameters" # Check if we have computed the hash before if self._hash is None: # Compute hash based on signature self._hash = int(self.signature(), 16) return self._hash def __eq__(self, other): "Check for equality" return hash(self) == hash(other) def signature(self): "Return unique string for form + parameters" # Check if we have computed the signature before if not self._signature is None: return self._signature # Get signature from form form_signature = self.form.signature() # Compute other relevant signatures parameters_signature = _parameters_signature(self.parameters) ffc_signature = str(FFC_VERSION) # Build common signature signatures = [form_signature, parameters_signature, ffc_signature, ufc_signature] string = ";".join(signatures) self._signature = sha1(string.encode('utf-8')).hexdigest() # Uncomment for debugging #print "form_signature =", form_signature #print "parameters_signature =", parameters_signature #print "ffc_signature =", ffc_signature #print "signature =", self._signature return self._signature def _parameters_signature(parameters): "Return parameters signature (some parameters must be ignored)." parameters = compilation_relevant_parameters(parameters) return str(canonicalize_metadata(parameters)) ffc-1.6.0/ffc/log.py000066400000000000000000000043241255571034100141530ustar00rootroot00000000000000"""This module provides functions used by the FFC implementation to output messages. These may be redirected by the user of FFC. This module reuses the corresponding log.py module from UFL which is a wrapper for the standard Python logging module. """ # Copyright (C) 2009 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Kristian B. Oelgaard, 2009 # UFL modules from ufl.log import Logger from ufl.log import log_functions from ufl.log import INFO, DEBUG, ERROR, CRITICAL from ufl.common import dstr, tstr from ufl.utils.sorting import sorted_by_key # Create FFC logger ffc_logger = Logger("FFC") # Create FFC global log functions for foo in log_functions: exec("%s = lambda *message : ffc_logger.%s(*message)" % (foo, foo)) # Assertion, copied from UFL def ffc_assert(condition, *message): "Assert that condition is true and otherwise issue an error with given message." condition or error(*message) # Set default log level set_level(INFO) #--- Specialized FFC debugging tools --- def debug_dict(d, title=""): "Pretty-print dictionary." if not title: title = "Dictionary" info("") begin(title) info("") for (key, value) in sorted_by_key(d): info(key) info("-"*len(key)) info(str(value)) info("") end() def debug_ir(ir, name=""): "Debug intermediate representation." title = "Intermediate representation" if name: title += " (%s)" % str(name) debug_dict(ir, title) def debug_code(code, name=""): "Debug generated code." title = "Generated code" if name: title += " (%s)" % str(name) debug_dict(code, title) ffc-1.6.0/ffc/mixedelement.py000066400000000000000000000125131255571034100160510ustar00rootroot00000000000000# Copyright (C) 2005-2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Garth N. Wells, 2006-2009 # Modified by Marie E. Rognes, 2007-2010 # Modified by Kristian B. Oelgaard, 2010 # Modified by Lizao Li, 2015 # # Last changed: 2015-04-25 # Python modules import numpy # FFC modules from ffc.log import error # UFL utils from ufl.utils.sequences import product class MixedElement: "Create a FFC mixed element from a list of FFC/FIAT elements." def __init__(self, elements): self._elements = elements self._entity_dofs = _combine_entity_dofs(self._elements) def elements(self): return self._elements def space_dimension(self): return sum(e.space_dimension() for e in self._elements) def value_shape(self): # Values of Tensor elements are flattened in MixedElements num_comps = lambda x: numpy.prod(x) if x else 1 return (sum(num_comps(e.value_shape()) or 1 for e in self._elements),) def entity_dofs(self): return self._entity_dofs def mapping(self): return [m for e in self._elements for m in e.mapping()] def dual_basis(self): return [L for e in self._elements for L in e.dual_basis()] def num_components(self): return sum(_num_components(e) for e in self._elements) def tabulate(self, order, points): """ Tabulate values on mixed element by appropriately reordering the tabulated values for the nested elements. The table of values is organized as follows: D^a v_i[j](x_k) = table[a][i][j][k] where a is a multi-index (tuple) representing the derivative. For example, a = (1, 1) represents d/dx d/dy. """ # Special case: only one element # NOTE: KBO: Why do we need this special case? (FFC Bug #798578) # When calling tabulate() on a MixedElement one should be able to # rely on getting data back which is ordered like a mixed element # irrespective of the number of elements? # if len(self._elements) == 1: # return self._elements[0].tabulate(order, points) # Zeros for insertion into mixed table table_shape = (self.space_dimension(), self.num_components(), len(points)) # Iterate over elements and fill in non-zero values irange = (0, 0) crange = (0, 0) mixed_table = {} for element in self._elements: # Tabulate element table = element.tabulate(order, points) # Compute range for insertion into mixed table irange = (irange[1], irange[1] + element.space_dimension()) crange = (crange[1], crange[1] + _num_components(element)) # Insert table into mixed table for dtuple in table.keys(): # Insert zeros if necessary (should only happen first time) if not dtuple in mixed_table: # NOTE: It is super important to create a new numpy.zeros # instance to avoid manipulating a numpy reference in case # it is created outside the loop. mixed_table[dtuple] = numpy.zeros(table_shape) # Insert non-zero values if (crange[1] - crange[0]) > 1: mixed_table[dtuple][irange[0]:irange[1], crange[0]:crange[1]] = numpy.reshape(table[dtuple], (irange[1] - irange[0], crange[1] - crange[0], len(points))) else: mixed_table[dtuple][irange[0]:irange[1], crange[0]] = table[dtuple] return mixed_table #--- Utility functions --- def _combine_entity_dofs(elements): """ Combine the entity_dofs from a list of elements into a combined entity_dof containing the information for all the elements. """ # Return {} if no elements if not elements: return {} # Initialize entity_dofs dictionary entity_dofs = dict((key, {}) for key in elements[0].entity_dofs()) for dim in elements[0].entity_dofs(): for entity in elements[0].entity_dofs()[dim]: entity_dofs[dim][entity] = [] offset = 0 # Insert dofs from each element into the mixed entity_dof. for e in elements: dofs = e.entity_dofs() for dim in dofs: for entity in dofs[dim]: # Must take offset into account shifted_dofs = [v + offset for v in dofs[dim][entity]] # Insert dofs from this element into the entity_dofs entity_dofs[dim][entity] += shifted_dofs # Adjust offset offset += e.space_dimension() return entity_dofs def _num_components(element): "Compute number of components for element." return product(element.value_shape()) ffc-1.6.0/ffc/optimization.py000066400000000000000000000037201255571034100161170ustar00rootroot00000000000000""" Compiler stage 5: optimization ------------------------------ This module implements the optimization of an intermediate code representation. """ # Copyright (C) 2009-2013 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Martin Alnaes, 2013 # # First added: 2009-12-22 # Last changed: 2013-02-10 # FFC modules from ffc.log import info, begin, end from ffc.representation import pick_representation def optimize_ir(ir, parameters): "Optimize intermediate form representation." begin("Compiler stage 3: Optimizing intermediate representation") # Check if optimization is requested if not parameters["optimize"]: info("Skipping optimizations, add -O to optimize") end() return ir # Extract representations ir_elements, ir_dofmaps, ir_integrals, ir_forms = ir # Iterate over integrals oir_integrals = [_optimize_integral_ir(ir, parameters) for ir in ir_integrals] end() return ir_elements, ir_dofmaps, oir_integrals, ir_forms def _optimize_integral_ir(ir, parameters): "Compute optimized intermediate represention of integral." # Select representation r = pick_representation(ir["representation"]) # Optimize representation (if available for representation) try: oir = r.optimize_integral_ir(ir, parameters) return oir except: return ir ffc-1.6.0/ffc/parameters.py000066400000000000000000000062261255571034100155400ustar00rootroot00000000000000# Copyright (C) 2005-2015 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . from ffc.log import INFO FFC_PARAMETERS = { "format": "ufc", # code generation format "representation": "auto", # form representation / code # generation strategy "quadrature_rule": "auto", # quadrature rule used for # integration of element tensors "quadrature_degree": "auto", # quadrature degree used for # computing integrals "precision": 15, # precision used when writing # numbers "epsilon": 1e-14, # machine precision, used for # dropping zero terms "split": False, # split generated code into .h and # .cpp file "form_postfix": True, # postfix form name with "Function", # "LinearForm" or BilinearForm "convert_exceptions_to_warnings": False, # convert all exceptions to warning # in generated code "cache_dir": "", # cache dir used by Instant "output_dir": ".", # output directory for generated # code "cpp_optimize": True, # optimization for the JIT compiler "cpp_optimize_flags": "-O2", # optimization flags for the JIT compiler "optimize": False, # optimise the code generation "restrict_keyword": "", # compiler specific "__restrict" or "__restrict__" keyword "log_level": INFO, # log level, displaying only # messages with level >= log_level "log_prefix": "", # log prefix "error_control": False, # with error control } def default_parameters(): "Return (a copy of) the default parameter values for FFC." return FFC_PARAMETERS.copy() def compilation_relevant_parameters(parameters): parameters = parameters.copy() ignores = ["log_prefix", "log_level", "cache_dir", "output_dir"] for ignore in ignores: assert ignore in FFC_PARAMETERS if ignore in parameters: del parameters[ignore] return parameters ffc-1.6.0/ffc/plot.py000066400000000000000000000717421255571034100143600ustar00rootroot00000000000000"This module provides functionality for plotting finite elements." # Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-12-07 # Last changed: 2010-12-15 __all__ = ["plot"] from numpy import dot, cross, array, sin, cos, pi, sqrt from numpy.linalg import norm import sys from ffc.fiatinterface import create_element from ffc.log import warning, error, info # Import Soya3D try: import soya from soya.sphere import Sphere from soya.label3d import Label3D from soya.sdlconst import QUIT _soya_imported = True except: _soya_imported = False # Colors for elements element_colors = {"Argyris": (0.45, 0.70, 0.80), "Arnold-Winther": (0.00, 0.00, 1.00), "Brezzi-Douglas-Marini": (1.00, 1.00, 0.00), "Crouzeix-Raviart": (1.00, 0.25, 0.25), "Discontinuous Lagrange": (0.00, 0.25, 0.00), "Discontinuous Raviart-Thomas": (0.90, 0.90, 0.30), "Hermite": (0.50, 1.00, 0.50), "Lagrange": (0.00, 1.00, 0.00), "Mardal-Tai-Winther": (1.00, 0.10, 0.90), "Morley": (0.40, 0.40, 0.40), "Nedelec 1st kind H(curl)": (0.90, 0.30, 0.00), "Nedelec 2nd kind H(curl)": (0.70, 0.20, 0.00), "Raviart-Thomas": (0.90, 0.60, 0.00)} def plot(element, rotate=True): "Plot finite element." # Check if Soya3D has been imported if not _soya_imported: warning("Unable to plot element, Soya3D not available (install package python-soya).") return # Special case: plot dof notation if element == "notation": # Create model for notation notation = create_notation_models() # Render plot window render(notation, "Notation", 0, True, rotate) else: # Create cell model cell, is3d = create_cell_model(element) cellname = element.cell().cellname() # Assuming single cell # Create dof models dofs, num_moments = create_dof_models(element) # Create title if element.degree() is not None: title = "%s of degree %d on a %s" % (element.family(), element.degree(), cellname) else: title = "%s on a %s" % (element.family(), cellname) # Render plot window render([cell] + dofs, title, num_moments, is3d, rotate) def render(models, title, num_moments, is3d, rotate): "Render given list of models." # Note that we view from the positive z-axis, and not from the # negative y-axis. This should make no difference since the # element dofs are symmetric anyway and it plays better with # the default camera settings in Soya. # Initialize Soya soya.init(title) # Create scene scene = soya.World() scene.atmosphere = soya.Atmosphere() if title == "Notation": scene.atmosphere.bg_color = (0.0, 1.0, 0.0, 1.0) else: scene.atmosphere.bg_color = (1.0, 1.0, 1.0, 1.0) # Not used, need to manually handle rotation #label = Label3D(scene, text=str(num_moments), size=0.005) #label.set_xyz(1.0, 1.0, 1.0) #label.set_color((0.0, 0.0, 0.0, 1.0)) # Define rotation if is3d: class RotatingBody(soya.Body): def advance_time(self, proportion): self.rotate_y(2.0 * proportion) else: class RotatingBody(soya.Body): def advance_time(self, proportion): self.rotate_z(2.0 * proportion) # Select type of display, rotating or not if rotate: Body = RotatingBody else: Body = soya.Body # Add all models for model in models: body = Body(scene, model) # Set light light = soya.Light(scene) if is3d: light.set_xyz(1.0, 5.0, 5.0) else: light.set_xyz(0.0, 0.0, 1.0) light.cast_shadow = 1 light.shadow_color = (0.0, 0.0, 0.0, 0.5) # Set camera camera = soya.Camera(scene) camera.ortho = 0 p = camera.position() if is3d: if rotate: camera.set_xyz(-20, 10, 50.0) camera.fov = 2.1 p.set_xyz(0.0, 0.4, 0.0) else: camera.set_xyz(-20, 10, 50.0) camera.fov = 1.6 p.set_xyz(0.3, 0.42, 0.5) else: if rotate: camera.set_xyz(0, 10, 50.0) camera.fov = 2.6 p.set_xyz(0.0, 0.0, 0.0) else: camera.set_xyz(0, 10, 50.0) camera.fov = 1.7 p.set_xyz(0.5, 0.4, 0.0) camera.look_at(p) soya.set_root_widget(camera) # Handle exit class Idler(soya.Idler): def end_round(self): for event in self.events: if event[0] == QUIT: print("Closing plot, bye bye") sys.exit(0) # Main loop idler = Idler(scene) idler.idle() def tangents(n): "Return normalized tangent vectors for plane defined by given vector." # Figure out which vector to take cross product with eps = 1e-10 e = array((1.0, 0.0, 0.0)) if norm(cross(n, e)) < eps: e = array((0.0, 1.0, 0.0)) # Take cross products and normalize t0 = cross(n, e) t0 = t0 / norm(t0) t1 = cross(n, t0) t1 = t1 / norm(t0) return t0, t1 def Cylinder(scene, p0, p1, r, color=(0.0, 0.0, 0.0, 1.0)): "Return model for cylinder from p0 to p1 with radius r." # Convert to NumPy array if isinstance(p0, soya.Vertex): p0 = array((p0.x, p0.y, p0.z)) p1 = array((p1.x, p1.y, p1.z)) else: p0 = array(p0) p1 = array(p1) # Get tangent vectors for plane n = p0 - p1 n = n / norm(n) t0, t1 = tangents(n) # Traverse the circles num_steps = 10 dtheta = 2.0*pi / float(num_steps) for i in range(num_steps): # Compute coordinates for square dx0 = cos(i*dtheta)*t0 + sin(i*dtheta)*t1 dx1 = cos((i + 1)*dtheta)*t0 + sin((i + 1)*dtheta)*t1 x0 = p0 + r*dx0 x1 = p0 + r*dx1 x2 = p1 + r*dx0 x3 = p1 + r*dx1 # Cover square by two triangles v0 = soya.Vertex(scene, x0[0], x0[1], x0[2], diffuse=color) v1 = soya.Vertex(scene, x1[0], x1[1], x1[2], diffuse=color) v2 = soya.Vertex(scene, x2[0], x2[1], x2[2], diffuse=color) v3 = soya.Vertex(scene, x3[0], x3[1], x3[2], diffuse=color) f0 = soya.Face(scene, (v0, v1, v2)) f1 = soya.Face(scene, (v1, v2, v3)) f0.double_sided = 1 f1.double_sided = 1 # Extract model model = scene.to_model() return model def Cone(scene, p0, p1, r, color=(0.0, 0.0, 0.0, 1.0)): "Return model for cone from p0 to p1 with radius r." # Convert to NumPy array if isinstance(p0, soya.Vertex): p0 = array((p0.x, p0.y, p0.z)) p1 = array((p1.x, p1.y, p1.z)) else: p0 = array(p0) p1 = array(p1) # Get tangent vectors for plane n = p0 - p1 n = n / norm(n) t0, t1 = tangents(n) # Traverse the circles num_steps = 10 dtheta = 2.0*pi / float(num_steps) v2 = soya.Vertex(scene, p1[0], p1[1], p1[2], diffuse=color) for i in range(num_steps): # Compute coordinates for bottom of face dx0 = cos(i*dtheta)*t0 + sin(i*dtheta)*t1 dx1 = cos((i + 1)*dtheta)*t0 + sin((i + 1)*dtheta)*t1 x0 = p0 + r*dx0 x1 = p0 + r*dx1 # Create face v0 = soya.Vertex(scene, x0[0], x0[1], x0[2], diffuse=color) v1 = soya.Vertex(scene, x1[0], x1[1], x1[2], diffuse=color) f = soya.Face(scene, (v0, v1, v2)) f.double_sided = 1 # Extract model model = scene.to_model() return model def Arrow(scene, x, n, center=False): "Return model for arrow from x in direction n." # Convert to Numpy arrays x = array(x) n = array(n) # Get tangents t0, t1 = tangents(n) # Dimensions for arrow L = 0.3 l = 0.35*L r = 0.04*L R = 0.125*L # Center arrow if center: print("Centering!") x -= 0.5*(L + l)*n # Create cylinder and cone cylinder = Cylinder(scene, x, x + L*n, r) cone = Cone(scene, x + L*n, x + (L + l)*n, R) # Extract model return scene.to_model() def UnitTetrahedron(color=(0.0, 1.0, 0.0, 0.5)): "Return model for unit tetrahedron." info("Plotting unit tetrahedron") # Create separate scene (since we will extract a model, not render) scene = soya.World() # Create vertices v0 = soya.Vertex(scene, 0.0, 0.0, 0.0, diffuse=color) v1 = soya.Vertex(scene, 1.0, 0.0, 0.0, diffuse=color) v2 = soya.Vertex(scene, 0.0, 1.0, 0.0, diffuse=color) v3 = soya.Vertex(scene, 0.0, 0.0, 1.0, diffuse=color) # Create edges e0 = Cylinder(scene, v0, v1, 0.007) e1 = Cylinder(scene, v0, v2, 0.007) e2 = Cylinder(scene, v0, v3, 0.007) e3 = Cylinder(scene, v1, v2, 0.007) e4 = Cylinder(scene, v1, v3, 0.007) e5 = Cylinder(scene, v2, v3, 0.007) # Create faces f0 = soya.Face(scene, (v1, v2, v3)) f1 = soya.Face(scene, (v0, v2, v3)) f2 = soya.Face(scene, (v0, v1, v3)) f3 = soya.Face(scene, (v0, v1, v2)) # Make faces double sided f0.double_sided = 1 f1.double_sided = 1 f2.double_sided = 1 f3.double_sided = 1 # Extract model model = scene.to_model() return model def UnitTriangle(color=(0.0, 1.0, 0.0, 0.5)): "Return model for unit tetrahedron." info("Plotting unit triangle") # Create separate scene (since we will extract a model, not render) scene = soya.World() # Create vertice v0 = soya.Vertex(scene, 0.0, 0.0, 0.0, diffuse=color) v1 = soya.Vertex(scene, 1.0, 0.0, 0.0, diffuse=color) v2 = soya.Vertex(scene, 0.0, 1.0, 0.0, diffuse=color) # Create edges e0 = Cylinder(scene, v0, v1, 0.007) e1 = Cylinder(scene, v0, v2, 0.007) e2 = Cylinder(scene, v1, v2, 0.007) # Create face f = soya.Face(scene, (v0, v1, v2)) # Make face double sided f.double_sided = 1 # Extract model model = scene.to_model() return model def PointEvaluation(x): "Return model for point evaluation at given point." info("Plotting dof: point evaluation at x = %s" % str(x)) # Make sure point is 3D x = to3d(x) # Create separate scene (since we will extract a model, not render) scene = soya.World() # Define material (color) for the sphere material = soya.Material() material.diffuse = (0.0, 0.0, 0.0, 1.0) # Create sphere sphere = Sphere(scene, material=material) # Scale and moveand move to coordinate sphere.scale(0.05, 0.05, 0.05) p = sphere.position() p.set_xyz(x[0], x[1], x[2]) sphere.move(p) # Extract model model = scene.to_model() return model def PointDerivative(x): "Return model for evaluation of derivatives at given point." info("Plotting dof: point derivative at x = %s" % str(x)) # Make sure point is 3D x = to3d(x) # Create separate scene (since we will extract a model, not render) scene = soya.World() # Define material (color) for the sphere material = soya.Material() material.diffuse = (0.0, 0.0, 0.0, 0.2) # Create sphere sphere = Sphere(scene, material=material) # Scale and moveand move to coordinate sphere.scale(0.1, 0.1, 0.1) p = sphere.position() p.set_xyz(x[0], x[1], x[2]) sphere.move(p) # Extract model model = scene.to_model() return model def PointSecondDerivative(x): "Return model for evaluation of second derivatives at given point." info("Plotting dof: point derivative at x = %s" % str(x)) # Make sure point is 3D x = to3d(x) # Create separate scene (since we will extract a model, not render) scene = soya.World() # Define material (color) for the sphere material = soya.Material() material.diffuse = (0.0, 0.0, 0.0, 0.05) # Create sphere sphere = Sphere(scene, material=material) # Scale and moveand move to coordinate sphere.scale(0.15, 0.15, 0.15) p = sphere.position() p.set_xyz(x[0], x[1], x[2]) sphere.move(p) # Extract model model = scene.to_model() return model def DirectionalEvaluation(x, n, flip=False, center=False): "Return model for directional evaluation at given point in given direction." info("Plotting dof: directional evaluation at x = %s in direction n = %s" % (str(x), str(n))) # Make sure points are 3D x = to3d(x) n = to3d(n) # Create separate scene (since we will extract a model, not render) scene = soya.World() # Normalize n = array(n) n = 0.75 * n / norm(n) # Flip normal if necessary if flip and not pointing_outwards(x, n): info("Flipping direction of arrow so it points outward.") n = -n # Create arrow arrow = Arrow(scene, x, n, center) # Extract model model = scene.to_model() return model def DirectionalDerivative(x, n): "Return model for directional derivative at given point in given direction." info("Plotting dof: directional derivative at x = %s in direction n = %s" % (str(x), str(n))) # Make sure points are 3D x = to3d(x) n = to3d(n) # Create separate scene (since we will extract a model, not render) scene = soya.World() # Normalize n = array(n) n = 0.75 * n / norm(n) # Create line line = Cylinder(scene, x - 0.07*n, x + 0.07*n, 0.005) # Extract model model = scene.to_model() return model def IntegralMoment(cellname, num_moments, x=None): "Return model for integral moment for given element." info("Plotting dof: integral moment") # Set position if x is None and cellname == "triangle": a = 1.0 / (2 + sqrt(2)) # this was a fun exercise x = (a, a, 0.0) elif x is None: a = 1.0 / (3 + sqrt(3)) # so was this x = (a, a, a) # Make sure point is 3D x = to3d(x) # Fancy scaling of radius and color r = 1.0 / (num_moments + 5) if num_moments % 2 == 0: c = 1.0 else: c = 0.0 # Create separate scene (since we will extract a model, not render) scene = soya.World() # Define material (color) for the sphere material = soya.Material() material.diffuse = (c, c, c, 0.7) # Create sphere sphere = Sphere(scene, material=material) # Scale and moveand move to coordinate sphere.scale(r, r, r) p = sphere.position() p.set_xyz(x[0], x[1], x[2]) sphere.move(p) # Extract model model = scene.to_model() return model def create_cell_model(element): "Create Soya3D model for cell." # Get color family = element.family() if not family in element_colors: warning("Don't know a good color for elements of type '%s', using default color." % family) family = "Lagrange" color = element_colors[family] color = (color[0], color[1], color[2], 0.7) # Create model based on cell type cellname = element.cell().cellname() if cellname == "triangle": return UnitTriangle(color), False elif cellname == "tetrahedron": return UnitTetrahedron(color), True error("Unable to plot element, unhandled cell type: %s" % str(cellname)) def create_dof_models(element): "Create Soya3D models for dofs." # Flags for whether to flip and center arrows directional = {"PointScaledNormalEval": (True, False), "PointEdgeTangent": (False, True), "PointFaceTangent": (False, True)} # Elements not supported fully by FIAT unsupported = {"Argyris": argyris_dofs, "Arnold-Winther": arnold_winther_dofs, "Hermite": hermite_dofs, "Mardal-Tai-Winther": mardal_tai_winther_dofs, "Morley": morley_dofs} # Check if element is supported family = element.family() if not family in unsupported: # Create FIAT element and get dofs fiat_element = create_element(element) dofs = [(dof.get_type_tag(), dof.get_point_dict()) for dof in fiat_element.dual_basis()] else: # Bybass FIAT and set the dofs ourselves dofs = unsupported[family](element) # Iterate over dofs and add models models = [] num_moments = 0 for (dof_type, L) in dofs: # Check type of dof if dof_type == "PointEval": # Point evaluation, just get point points = list(L.keys()) if not len(points) == 1: error("Strange dof, single point expected.") x = points[0] # Generate model models.append(PointEvaluation(x)) elif dof_type == "PointDeriv": # Evaluation of derivatives at point points = list(L.keys()) if not len(points) == 1: error("Strange dof, single point expected.") x = points[0] # Generate model models.append(PointDerivative(x)) elif dof_type == "PointSecondDeriv": # Evaluation of derivatives at point points = list(L.keys()) if not len(points) == 1: error("Strange dof, single point expected.") x = points[0] # Generate model models.append(PointSecondDerivative(x)) elif dof_type in directional: # Normal evaluation, get point and normal points = list(L.keys()) if not len(points) == 1: error("Strange dof, single point expected.") x = points[0] n = [xx[0] for xx in L[x]] # Generate model flip, center = directional[dof_type] models.append(DirectionalEvaluation(x, n, flip, center)) elif dof_type == "PointNormalDeriv": # Evaluation of derivatives at point points = list(L.keys()) if not len(points) == 1: error("Strange dof, single point expected.") x = points[0] n = [xx[0] for xx in L[x]] # Generate model models.append(DirectionalDerivative(x, n)) elif dof_type in ("FrobeniusIntegralMoment", "IntegralMoment", "ComponentPointEval"): # Generate model models.append(IntegralMoment(element.cell().cellname(), num_moments)) # Count the number of integral moments num_moments += 1 else: error("Unable to plot dof, unhandled dof type: %s" % str(dof_type)) return models, num_moments def create_notation_models(): "Create Soya 3D models for notation." models = [] y = 1.3 dy = -0.325 # Create model for evaluation models.append(PointEvaluation([0, y])) y += dy # Create model for derivative evaluation models.append(PointDerivative([0, y])) models.append(PointDerivative([0, y])) models.append(PointDerivative([0, y])) y += dy # Create model for second derivative evaluation models.append(PointSecondDerivative([0, y])) models.append(PointSecondDerivative([0, y])) models.append(PointSecondDerivative([0, y])) y += dy # Create model for directional evaluation models.append(DirectionalEvaluation([0, y], [1, 1], False, True)) y += dy # Create model for directional evaluation models.append(DirectionalDerivative([0, y], [1, 1])) y += dy # Create model for integral moments models.append(IntegralMoment("tetrahedron", 0, [0, y])) models.append(IntegralMoment("tetrahedron", 1, [0, y])) models.append(IntegralMoment("tetrahedron", 2, [0, y])) return models def pointing_outwards(x, n): "Check if n is pointing inwards, used for flipping dofs." eps = 1e-10 x = array(x) + 0.1*array(n) return x[0] < -eps or x[1] < -eps or x[2] < -eps or x[2] > 1.0 - x[0] - x[1] + eps def to3d(x): "Make sure point is 3D." if len(x) == 2: x = (x[0], x[1], 0.0) return x def arnold_winther_dofs(element): "Special fix for Arnold-Winther elements until Rob fixes in FIAT." if not element.cell().cellname() == "triangle": error("Unable to plot element, only know how to plot Mardal-Tai-Winther on triangles.") return [("PointEval", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointEval", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointEval", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointEval", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointEval", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointEval", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointEval", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointEval", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointEval", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointScaledNormalEval", {(1.0/5, 0.0): [ (0.0, (0,)), (-1.0, (1,))]}), ("PointScaledNormalEval", {(2.0/5, 0.0): [ (0.0, (0,)), (-1.0, (1,))]}), ("PointScaledNormalEval", {(3.0/5, 0.0): [ (0.0, (0,)), (-1.0, (1,))]}), ("PointScaledNormalEval", {(4.0/5, 0.0): [ (0.0, (0,)), (-1.0, (1,))]}), ("PointScaledNormalEval", {(4.0/5, 1.0/5.0): [ (1.0, (0,)), (1.0, (1,))]}), ("PointScaledNormalEval", {(3.0/5, 2.0/5.0): [ (1.0, (0,)), (1.0, (1,))]}), ("PointScaledNormalEval", {(2.0/5, 3.0/5.0): [ (1.0, (0,)), (1.0, (1,))]}), ("PointScaledNormalEval", {(1.0/5, 4.0/5.0): [ (1.0, (0,)), (1.0, (1,))]}), ("PointScaledNormalEval", {(0.0, 1.0/5.0): [ (-1.0, (0,)), (0.0, (1,))]}), ("PointScaledNormalEval", {(0.0, 2.0/5.0): [ (-1.0, (0,)), (0.0, (1,))]}), ("PointScaledNormalEval", {(0.0, 3.0/5.0): [ (-1.0, (0,)), (0.0, (1,))]}), ("PointScaledNormalEval", {(0.0, 4.0/5.0): [ (-1.0, (0,)), (0.0, (1,))]}), ("IntegralMoment", None), ("IntegralMoment", None), ("IntegralMoment", None)] def argyris_dofs(element): "Special fix for Hermite elements until Rob fixes in FIAT." if not element.degree() == 5: error("Unable to plot element, only know how to plot quintic Argyris elements.") if not element.cell().cellname() == "triangle": error("Unable to plot element, only know how to plot Argyris on triangles.") return [("PointEval", {(0.0, 0.0): [ (1.0, ()) ]}), ("PointEval", {(1.0, 0.0): [ (1.0, ()) ]}), ("PointEval", {(0.0, 1.0): [ (1.0, ()) ]}), ("PointDeriv", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice ("PointDeriv", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice ("PointDeriv", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice ("PointDeriv", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice ("PointDeriv", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof twice ("PointDeriv", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof twice ("PointSecondDeriv", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointSecondDeriv", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointSecondDeriv", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointSecondDeriv", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointSecondDeriv", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointSecondDeriv", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointSecondDeriv", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointSecondDeriv", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointSecondDeriv", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointNormalDeriv", {(0.5, 0.0): [ (0.0, (0,)), (-1.0, (1,))]}), ("PointNormalDeriv", {(0.5, 0.5): [ (1.0, (0,)), ( 1.0, (1,))]}), ("PointNormalDeriv", {(0.0, 0.5): [(-1.0, (0,)), ( 0.0, (1,))]})] def hermite_dofs(element): "Special fix for Hermite elements until Rob fixes in FIAT." dofs_2d = [("PointEval", {(0.0, 0.0): [ (1.0, ()) ]}), ("PointEval", {(1.0, 0.0): [ (1.0, ()) ]}), ("PointEval", {(0.0, 1.0): [ (1.0, ()) ]}), ("PointDeriv", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice ("PointDeriv", {(0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice ("PointDeriv", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice ("PointDeriv", {(1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof twice ("PointDeriv", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof twice ("PointDeriv", {(0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof twice ("PointEval", {(1.0/3, 1.0/3): [ (1.0, ()) ]})] dofs_3d = [("PointEval", {(0.0, 0.0, 0.0): [ (1.0, ()) ]}), ("PointEval", {(1.0, 0.0, 0.0): [ (1.0, ()) ]}), ("PointEval", {(0.0, 1.0, 0.0): [ (1.0, ()) ]}), ("PointEval", {(0.0, 0.0, 1.0): [ (1.0, ()) ]}), ("PointDeriv", {(0.0, 0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointDeriv", {(0.0, 0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointDeriv", {(0.0, 0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointDeriv", {(1.0, 0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointDeriv", {(1.0, 0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointDeriv", {(1.0, 0.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointDeriv", {(0.0, 1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointDeriv", {(0.0, 1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointDeriv", {(0.0, 1.0, 0.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointDeriv", {(0.0, 0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointDeriv", {(0.0, 0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointDeriv", {(0.0, 0.0, 1.0): [ (1.0, ()) ]}), # hack, same dof three times ("PointEval", {(1.0/3, 1.0/3, 1.0/3): [ (1.0, ()) ]}), ("PointEval", {(0.0, 1.0/3, 1.0/3): [ (1.0, ()) ]}), ("PointEval", {(1.0/3, 0.0, 1.0/3): [ (1.0, ()) ]}), ("PointEval", {(1.0/3, 1.0/3, 0.0): [ (1.0, ()) ]})] if element.cell().cellname() == "triangle": return dofs_2d else: return dofs_3d def mardal_tai_winther_dofs(element): "Special fix for Mardal-Tai-Winther elements until Rob fixes in FIAT." if not element.cell().cellname() == "triangle": error("Unable to plot element, only know how to plot Mardal-Tai-Winther on triangles.") return [("PointScaledNormalEval", {(1.0/3, 0.0): [ (0.0, (0,)), (-1.0, (1,))]}), ("PointScaledNormalEval", {(2.0/3, 0.0): [ (0.0, (0,)), (-1.0, (1,))]}), ("PointScaledNormalEval", {(2.0/3, 1.0/3.0): [ (1.0, (0,)), (1.0, (1,))]}), ("PointScaledNormalEval", {(1.0/3, 2.0/3.0): [ (1.0, (0,)), (1.0, (1,))]}), ("PointScaledNormalEval", {(0.0, 1.0/3.0): [ (-1.0, (0,)), (0.0, (1,))]}), ("PointScaledNormalEval", {(0.0, 2.0/3.0): [ (-1.0, (0,)), (0.0, (1,))]}), ("PointEdgeTangent", {(0.5, 0.0): [ (-1.0, (0,)), (0.0, (1,))]}), ("PointEdgeTangent", {(0.5, 0.5): [ (-1.0, (0,)), (1.0, (1,))]}), ("PointEdgeTangent", {(0.0, 0.5): [ (0.0, (0,)), (-1.0, (1,))]})] def morley_dofs(element): "Special fix for Morley elements until Rob fixes in FIAT." if not element.cell().cellname() == "triangle": error("Unable to plot element, only know how to plot Morley on triangles.") return [("PointEval", {(0.0, 0.0): [ (1.0, ()) ]}), ("PointEval", {(1.0, 0.0): [ (1.0, ()) ]}), ("PointEval", {(0.0, 1.0): [ (1.0, ()) ]}), ("PointNormalDeriv", {(0.5, 0.0): [ (0.0, (0,)), (-1.0, (1,))]}), ("PointNormalDeriv", {(0.5, 0.5): [ (1.0, (0,)), ( 1.0, (1,))]}), ("PointNormalDeriv", {(0.0, 0.5): [(-1.0, (0,)), ( 0.0, (1,))]})] ffc-1.6.0/ffc/quadrature/000077500000000000000000000000001255571034100151725ustar00rootroot00000000000000ffc-1.6.0/ffc/quadrature/__init__.py000066400000000000000000000002531255571034100173030ustar00rootroot00000000000000from .quadraturerepresentation import compute_integral_ir from .quadratureoptimization import optimize_integral_ir from .quadraturegenerator import generate_integral_code ffc-1.6.0/ffc/quadrature/expr.py000066400000000000000000000112071255571034100165230ustar00rootroot00000000000000"This file implements a base class to represent an expression." # Copyright (C) 2009-2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2009-08-08 # Last changed: 2010-01-21 # FFC quadrature modules. from .symbolics import create_float class Expr(object): __slots__ = ("val", "t", "_prec", "_repr", "_hash") def __init__(self): """An Expr object contains: val - float, holds value of object. t - Type (int), one of CONST, GEO, IP, BASIS. _prec - int, precedence which is used for comparison and comparing classes. _repr - str, string value of __repr__(), we only compute it once. _hash - int, hash value of __hash__(), we only compute it once. The constructor is empty, so initialisation of variables are left to child classes.""" pass # Representation of the expression. def __repr__(self): "Representation of the expression for comparison and debugging." return self._repr # Hash. def __hash__(self): "Hash (for lookup in {})." return self._hash # Comparison. def __eq__(self, other): "==, True if representations are equal." if isinstance(other, Expr): return self._repr == other._repr return False def __ne__(self, other): "!=, True if representations are not equal." if isinstance(other, Expr): return self._repr != other._repr return True def __lt__(self, other): """<, compare precedence and _repr if two objects have the same precedence.""" if not isinstance(other, Expr): return False if self._prec < other._prec: return True elif self._prec == other._prec: return self._repr < other._repr return False def __gt__(self, other): ">, opposite of __lt__." if not isinstance(other, Expr): return True if self._prec > other._prec: return True elif self._prec == other._prec: return self._repr > other._repr return False # Public functions (for FloatValue, other classes should overload as needed) def expand(self): """Expand the expression. (FloatValue and Symbol are expanded by construction).""" # Nothing to be done. return self def get_unique_vars(self, var_type): "Get unique variables (Symbols) as a set." # A float is not a variable. return set() def get_var_occurrences(self): """Determine the number of times all variables occurs in the expression. Returns a dictionary of variables and the number of times they occur. Works for FloatValue and Symbol.""" # There is only one float value (if it is not -1 or 1). if self.val == 1.0 or self.val == -1.0: return {} return {self:1} def ops(self): """Return number of operations to compute the expression. This is always zero for a FloatValue.""" # Just return 0. # NOTE: This means that minus in e.g., -2 and -2*x is not counted. return 0 def reduce_ops(self): """Reduce number of operations to evaluate the expression. There is nothing to be done for FloatValue and Symbol.""" # Nothing to be done. return self def reduce_var(self, var): """Reduce the expression by another variable by using division. This works for FloatValue, Symbol and Product.""" return self/var def reduce_vartype(self, var_type): """Reduce expression with given var_type. It returns a tuple (found, remain), where 'found' is an expression that only has variables of type == var_type. If no variables are found, found=(). The 'remain' part contains the leftover after division by 'found' such that: self = found*remain. Works for FloatValue and Symbol.""" if self.t == var_type: return [(self, create_float(1))] return [((), self)] ffc-1.6.0/ffc/quadrature/floatvalue.py000066400000000000000000000133411255571034100177100ustar00rootroot00000000000000"This file implements a class to represent a float." # Copyright (C) 2009-2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2009-07-12 # Last changed: 2010-02-09 # FFC modules. from ffc.log import error from ffc.cpp import format # FFC quadrature modules. from .symbolics import CONST #from symbolics import format from .symbolics import create_float from .symbolics import create_product from .symbolics import create_sum from .symbolics import create_fraction from .expr import Expr class FloatValue(Expr): def __init__(self, value): """Initialise a FloatValue object, it derives from Expr and contains no additional variables. NOTE: self._prec = 0.""" # Initialise value, type and class. self.val = float(value) self.t = CONST self._prec = 0 # Handle 0.0, 1.0 and -1.0 values explicitly. EPS = format["epsilon"] if abs(value) < EPS: self.val = 0.0 elif abs(value - 1.0) < EPS: self.val = 1.0 elif abs(value + 1.0) < EPS: self.val = -1.0 # Compute the representation now, such that we can use it directly # in the __eq__ and __ne__ methods (improves performance a bit, but # only when objects are cached). self._repr = "FloatValue(%s)" % format["float"](self.val) # Use repr as hash value self._hash = hash(self._repr) # Print function. def __str__(self): "Simple string representation which will appear in the generated code." return format["float"](self.val) # Binary operators. def __add__(self, other): "Addition by other objects." # NOTE: We expect expanded objects here. # This is only well-defined if other is a float or if self.val == 0. if other._prec == 0: # float return create_float(self.val+other.val) elif self.val == 0.0: return other # Return a new sum return create_sum([self, other]) def __sub__(self, other): "Subtract other objects." # NOTE: We expect expanded objects here. if other._prec == 0: # float return create_float(self.val-other.val) # Multiply other by -1 elif self.val == 0.0: return create_product([create_float(-1), other]) # Return a new sum where other is multiplied by -1 return create_sum([self, create_product([create_float(-1), other])]) def __mul__(self, other): "Multiplication by other objects." # NOTE: We expect expanded objects here i.e., Product([FloatValue]) # should not be present. # Only handle case where other is a float, else let the other # object handle the multiplication. if other._prec == 0: # float return create_float(self.val*other.val) return other.__mul__(self) def __truediv__(self, other): "Division by other objects." # If division is illegal (this should definitely not happen). if other.val == 0.0: error("Division by zero") # TODO: Should we also support division by fraction for generality? # It should not be needed by this module. if other._prec == 4: # frac error("Did not expected to divide by fraction") # If fraction will be zero. if self.val == 0.0: return self # NOTE: We expect expanded objects here i.e., Product([FloatValue]) # should not be present. # Handle types appropriately. if other._prec == 0: # float return create_float(self.val/other.val) # If other is a symbol, return a simple fraction. elif other._prec == 1: # sym return create_fraction(self, other) # Don't handle division by sum. elif other._prec == 3: # sum # TODO: Here we could do: 4 / (2*x + 4*y) -> 2/(x + 2*y). return create_fraction(self, other) # If other is a product, remove any float value to avoid # 4 / (2*x), this will return 2/x. val = 1.0 for v in other.vrs: if v._prec == 0: # float val *= v.val # If we had any floats, create new numerator and only use 'real' variables # from the product in the denominator. if val != 1.0: # Check if we need to create a new denominator. # TODO: Just use other.vrs[1:] instead. if len(other.get_vrs()) > 1: return create_fraction(create_float(self.val/val), create_product(other.get_vrs())) # TODO: Because we expect all products to be expanded we shouldn't need # to check for this case, just use other.vrs[1]. elif len(other.get_vrs()) == 1: return create_fraction(create_float(self.val/val), other.vrs[1]) error("No variables left in denominator") # Nothing left to do. return create_fraction(self, other) __div__ = __truediv__ # FFC quadrature modules. from .symbol import Symbol from .product import Product from .sumobj import Sum from .fraction import Fraction ffc-1.6.0/ffc/quadrature/fraction.py000066400000000000000000000357131255571034100173620ustar00rootroot00000000000000"This file implements a class to represent a fraction." # Copyright (C) 2009-2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2009-07-12 # Last changed: 2010-02-09 # FFC modules. from ffc.log import error from ffc.cpp import format # FFC quadrature modules. from .symbolics import create_float from .symbolics import create_product from .symbolics import create_sum from .symbolics import create_fraction from .expr import Expr class Fraction(Expr): __slots__ = ("num", "denom", "_expanded", "_reduced") def __init__(self, numerator, denominator): """Initialise a Fraction object, it derives from Expr and contains the additional variables: num - expr, the numerator. denom - expr, the denominator. _expanded - object, an expanded object of self, e.g., self = 'x*y/x'-> self._expanded = y (a symbol). _reduced - object, a reduced object of self, e.g., self = '(2*x + x*y)/z'-> self._reduced = x*(2 + y)/z (a fraction). NOTE: self._prec = 4.""" # Check for illegal division. if denominator.val == 0.0: error("Division by zero.") # Initialise all variables. self.val = numerator.val self.t = min([numerator.t, denominator.t]) self.num = numerator self.denom = denominator self._prec = 4 self._expanded = False self._reduced = False # Only try to eliminate scalar values. # TODO: If we divide by a float, we could add the inverse to the # numerator as a product, but I don't know if this is efficient # since it will involve creating a new object. if denominator._prec == 0 and numerator._prec == 0: # float self.num = create_float(numerator.val/denominator.val) # Remove denominator, such that it will be excluded when printing. self.denom = None # Handle zero. if self.val == 0.0: # Remove denominator, such that it will be excluded when printing self.denom = None # Compute the representation now, such that we can use it directly # in the __eq__ and __ne__ methods (improves performance a bit, but # only when objects are cached). if self.denom: self._repr = "Fraction(%s, %s)" %(self.num._repr, self.denom._repr) else: self._repr = "Fraction(%s, %s)" %(self.num._repr, create_float(1)._repr) # Use repr as hash value. self._hash = hash(self._repr) # Print functions. def __str__(self): "Simple string representation which will appear in the generated code." if not self.denom: return str(self.num) # Get string for numerator and denominator. num = str(self.num) denom = str(self.denom) # Group numerator if it is a fraction, otherwise it should be handled already. if self.num._prec == 4: # frac num = format["grouping"](num) # Group denominator if it is a fraction or product, or if the value is negative. # NOTE: This will be removed by the optimisations later before writing any code. if self.denom._prec in (2, 4) or self.denom.val < 0.0: # prod or frac denom = format["grouping"](denom) # return num + format["division"] + denom return format["div"](num, denom) # Binary operators. def __add__(self, other): "Addition by other objects." # Add two fractions if their denominators are equal by creating # (expanded) sum of their numerators. if other._prec == 4 and self.denom == other.denom: # frac return create_fraction(create_sum([self.num, other.num]).expand(), self.denom) return create_sum([self, other]) def __sub__(self, other): "Subtract other objects." # Return a new sum if other._prec == 4 and self.denom == other.denom: # frac num = create_sum([self.num, create_product([FloatValue(-1), other.num])]).expand() return create_fraction(num, self.denom) return create_sum([self, create_product([FloatValue(-1), other])]) def __mul__(self, other): "Multiplication by other objects." # NOTE: assuming that we get expanded variables. # If product will be zero. if self.val == 0.0 or other.val == 0.0: return create_float(0) # Create new expanded numerator and denominator and use '/' to reduce. if other._prec != 4: # frac return (self.num*other)/self.denom # If we have a fraction, create new numerator and denominator and use # '/' to reduce expression. return create_product([self.num, other.num]).expand()/create_product([self.denom, other.denom]).expand() def __truediv__(self, other): "Division by other objects." # If division is illegal (this should definitely not happen). if other.val == 0.0: error("Division by zero.") # If fraction will be zero. if self.val == 0.0: return self.vrs[0] # The only thing that we shouldn't need to handle is division by other # Fractions if other._prec == 4: error("Did not expected to divide by fraction.") # Handle division by FloatValue, Symbol, Product and Sum in the same # way i.e., multiply other by the donominator and use division # (__div__ or other) in order to (try to) reduce the expression. # TODO: Is it better to first try to divide the numerator by other, # if a Fraction is the return value, then multiply the denominator of # that value by denominator of self. Otherwise the reduction was # successful and we just use the denom of self as denominator. return self.num/(other*self.denom) __div__ = __truediv__ # Public functions. def expand(self): "Expand the fraction expression." # If fraction is already expanded, simply return the expansion. if self._expanded: return self._expanded # If we don't have a denominator just return expansion of numerator. if not self.denom: return self.num.expand() # Expand numerator and denominator. num = self.num.expand() denom = self.denom.expand() # TODO: Is it too expensive to call expand in the below? # If both the numerator and denominator are fractions, create new # numerator and denominator and use division to possibly reduce the # expression. if num._prec == 4 and denom._prec == 4: # frac new_num = create_product([num.num, denom.denom]).expand() new_denom = create_product([num.denom, denom.num]).expand() self._expanded = new_num/new_denom # If the numerator is a fraction, multiply denominators and use # division to reduce expression. elif num._prec == 4: # frac new_denom = create_product([num.denom, denom]).expand() self._expanded = num.num/new_denom # If the denominator is a fraction multiply by the inverse and # use division to reduce expression. elif denom._prec == 4: # frac new_num = create_product([num, denom.denom]).expand() self._expanded = new_num/denom.num # Use division to reduce the expression, no need to call expand(). else: self._expanded = num/denom return self._expanded def get_unique_vars(self, var_type): "Get unique variables (Symbols) as a set." # Simply get the unique variables from numerator and denominator. var = self.num.get_unique_vars(var_type) var.update(self.denom.get_unique_vars(var_type)) return var def get_var_occurrences(self): """Determine the number of minimum number of times all variables occurs in the expression simply by calling the function on the numerator.""" return self.num.get_var_occurrences() def ops(self): "Return number of operations needed to evaluate fraction." # If we have a denominator, add the operations and +1 for '/'. if self.denom: return self.num.ops() + self.denom.ops() + 1 # Else we just return the number of operations for the numerator. return self.num.ops() def reduce_ops(self): # Try to reduce operations by reducing the numerator and denominator. # FIXME: We assume expanded variables here, so any common variables in # the numerator and denominator are already removed i.e, there is no # risk of encountering (x + x*y) / x -> x*(1 + y)/x -> (1 + y). if self._reduced: return self._reduced num = self.num.reduce_ops() # Only return a new Fraction if we still have a denominator. if self.denom: self._reduced = create_fraction(num, self.denom.reduce_ops()) else: self._reduced = num return self._reduced def reduce_var(self, var): "Reduce the fraction by another variable through division of numerator." # We assume that this function is only called by reduce_ops, such that # we just need to consider the numerator. return create_fraction(self.num/var, self.denom) def reduce_vartype(self, var_type): """Reduce expression with given var_type. It returns a tuple (found, remain), where 'found' is an expression that only has variables of type == var_type. If no variables are found, found=(). The 'remain' part contains the leftover after division by 'found' such that: self = found*remain.""" # Reduce the numerator by the var type. # print "self.num._prec: ", self.num._prec # print "self.num: ", self.num if self.num._prec == 3: foo = self.num.reduce_vartype(var_type) if len(foo) == 1: num_found, num_remain = foo[0] # num_found, num_remain = self.num.reduce_vartype(var_type)[0] else: # meg: I have only a marginal idea of what I'm doing here! # print "here: " new_sum = [] for num_found, num_remain in foo: if num_found == (): new_sum.append(create_fraction(num_remain, self.denom)) else: new_sum.append(create_fraction(create_product([num_found, num_remain]), self.denom)) return create_sum(new_sum).expand().reduce_vartype(var_type) else: # num_found, num_remain = self.num.reduce_vartype(var_type) foo = self.num.reduce_vartype(var_type) if len(foo) != 1: raise RuntimeError("This case is not handled") num_found, num_remain = foo[0] # # TODO: Remove this test later, expansion should have taken care of # # no denominator. # if not self.denom: # error("This fraction should have been expanded.") # If the denominator is not a Sum things are straightforward. denom_found = None denom_remain = None # print "self.denom: ", self.denom # print "self.denom._prec: ", self.denom._prec if self.denom._prec != 3: # sum # denom_found, denom_remain = self.denom.reduce_vartype(var_type) foo = self.denom.reduce_vartype(var_type) if len(foo) != 1: raise RuntimeError("This case is not handled") denom_found, denom_remain = foo[0] # If we have a Sum in the denominator, all terms must be reduced by # the same terms to make sense else: remain = [] for m in self.denom.vrs: # d_found, d_remain = m.reduce_vartype(var_type) foo = m.reduce_vartype(var_type) d_found, d_remain = foo[0] # If we've found a denom, but the new found is different from # the one already found, terminate loop since it wouldn't make # sense to reduce the fraction. # TODO: handle I0/((I0 + I1)/(G0 + G1) + (I1 + I2)/(G1 + G2)) # better than just skipping. # if len(foo) != 1: # raise RuntimeError("This case is not handled") if len(foo) != 1 or (denom_found is not None and repr(d_found) != repr(denom_found)): # If the denominator of the entire sum has a type which is # lower than or equal to the vartype that we are currently # reducing for, we have to move it outside the expression # as well. # TODO: This is quite application specific, but I don't see # how we can do it differently at the moment. if self.denom.t <= var_type: if not num_found: num_found = create_float(1) return [(create_fraction(num_found, self.denom), num_remain)] else: # The remainder is always a fraction return [(num_found, create_fraction(num_remain, self.denom))] # Update denom found and add remainder. denom_found = d_found remain.append(d_remain) # There is always a non-const remainder if denominator was a sum. denom_remain = create_sum(remain) # print "den f: ", denom_found # print "den r: ", denom_remain # If we have found a common denominator, but no found numerator, # create a constant. # TODO: Add more checks to avoid expansion. found = None # There is always a remainder. remain = create_fraction(num_remain, denom_remain).expand() # print "remain: ", repr(remain) if num_found: if denom_found: found = create_fraction(num_found, denom_found) else: found = num_found else: if denom_found: found = create_fraction(create_float(1), denom_found) else: found = () # print "found: ", found # print len((found, remain)) return [(found, remain)] # FFC quadrature modules. from .floatvalue import FloatValue from .symbol import Symbol from .product import Product from .sumobj import Sum ffc-1.6.0/ffc/quadrature/optimisedquadraturetransformer.py000066400000000000000000000650501255571034100241300ustar00rootroot00000000000000"QuadratureTransformer (optimised) for quadrature code generation to translate UFL expressions." # Copyright (C) 2009-2011 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Anders Logg, 2009 # Python modules. from numpy import shape from six import iteritems, iterkeys from six.moves import xrange as range from six import advance_iterator as next def firstkey(d): return next(iterkeys(d)) # UFL common. from ufl.common import product from ufl.utils.sorting import sorted_by_key # UFL Classes. from ufl.classes import FixedIndex from ufl.classes import IntValue from ufl.classes import FloatValue from ufl.classes import Coefficient from ufl.classes import Operator # FFC modules. from ffc.log import info, debug, error, ffc_assert from ffc.cpp import format from ffc.quadrature.quadraturetransformerbase import QuadratureTransformerBase from ffc.quadrature.quadratureutils import create_permutations # Symbolics functions #from ffc.quadrature.symbolics import set_format from ffc.quadrature.symbolics import create_float, create_symbol, create_product,\ create_sum, create_fraction, BASIS, IP, GEO, CONST class QuadratureTransformerOpt(QuadratureTransformerBase): "Transform UFL representation to quadrature code." def __init__(self, *args): # Initialise base class. QuadratureTransformerBase.__init__(self, *args) # set_format(format) # ------------------------------------------------------------------------- # Start handling UFL classes. # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # AlgebraOperators (algebra.py). # ------------------------------------------------------------------------- def sum(self, o, *operands): #print("Visiting Sum: " + repr(o) + "\noperands: " + "\n".join(map(repr, operands))) code = {} # Loop operands that has to be summend. for op in operands: # If entries does already exist we can add the code, otherwise just # dump them in the element tensor. for key, val in sorted(op.items()): if key in code: code[key].append(val) else: code[key] = [val] # Add sums and group if necessary. for key, val in sorted_by_key(code): if len(val) > 1: code[key] = create_sum(val) elif val: code[key] = val[0] else: error("Where did the values go?") # If value is zero just ignore it. if abs(code[key].val) < format["epsilon"]: del code[key] return code def product(self, o, *operands): #print("\n\nVisiting Product:\n" + str(tree_format(o))) permute = [] not_permute = [] # Sort operands in objects that needs permutation and objects that does not. for op in operands: # If we get an empty dict, something was zero and so is the product. if not op: return {} if len(op) > 1 or (op and firstkey(op) != ()): permute.append(op) elif op and firstkey(op) == (): not_permute.append(op[()]) # Create permutations. # TODO: After all indices have been expanded I don't think that we'll # ever get more than a list of entries and values. #print("\npermute: " + repr(permute)) #print("\nnot_permute: " + repr(not_permute)) permutations = create_permutations(permute) #print("\npermutations: " + repr(permutations)) # Create code. code ={} if permutations: for key, val in permutations.items(): # Sort key in order to create a unique key. l = sorted(key) # TODO: I think this check can be removed for speed since we # just have a list of objects we should never get any conflicts here. ffc_assert(tuple(l) not in code, "This key should not be in the code.") code[tuple(l)] = create_product(val + not_permute) else: return {():create_product(not_permute)} return code def division(self, o, *operands): #print("\n\nVisiting Division: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) ffc_assert(len(operands) == 2, "Expected exactly two operands (numerator and denominator): " + repr(operands)) # Get the code from the operands. numerator_code, denominator_code = operands # TODO: Are these safety checks needed? ffc_assert(() in denominator_code and len(denominator_code) == 1, \ "Only support function type denominator: " + repr(denominator_code)) code = {} # Get denominator and create new values for the numerator. denominator = denominator_code[()] for key, val in numerator_code.items(): code[key] = create_fraction(val, denominator) return code def power(self, o): #print("\n\nVisiting Power: " + repr(o)) # Get base and exponent. base, expo = o.ufl_operands # Visit base to get base code. base_code = self.visit(base) # TODO: Are these safety checks needed? ffc_assert(() in base_code and len(base_code) == 1, "Only support function type base: " + repr(base_code)) # Get the base code and create power. val = base_code[()] # Handle different exponents if isinstance(expo, IntValue): return {(): create_product([val]*expo.value())} elif isinstance(expo, FloatValue): exp = format["floating point"](expo.value()) sym = create_symbol(format["std power"](str(val), exp), val.t, val, 1) return {(): sym} elif isinstance(expo, (Coefficient, Operator)): exp = self.visit(expo)[()] # print "pow exp: ", exp # print "pow val: ", val sym = create_symbol(format["std power"](str(val), exp), val.t, val, 1) return {(): sym} else: error("power does not support this exponent: " + repr(expo)) def abs(self, o, *operands): #print("\n\nVisiting Abs: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) # TODO: Are these safety checks needed? ffc_assert(len(operands) == 1 and () in operands[0] and len(operands[0]) == 1, \ "Abs expects one operand of function type: " + repr(operands)) # Take absolute value of operand. val = operands[0][()] new_val = create_symbol(format["absolute value"](str(val)), val.t, val, 1) return {():new_val} def min_value(self, o, *operands): # Take minimum value of operands. val0 = operands[0][()] val1 = operands[1][()] t = min(val0.t, val1.t) # FIXME: I don't know how to implement this the optimized way. Is this right? new_val = create_symbol(format["min value"](str(val0), str(val1)), t) return {():new_val} def max_value(self, o, *operands): # Take maximum value of operands. val0 = operands[0][()] val1 = operands[1][()] t = min(val0.t, val1.t) # FIXME: I don't know how to implement this the optimized way. Is this right? new_val = create_symbol(format["max value"](str(val0), str(val1)), t) return {():new_val} # ------------------------------------------------------------------------- # Condition, Conditional (conditional.py). # ------------------------------------------------------------------------- def not_condition(self, o, *operands): # This is a Condition but not a BinaryCondition, and the operand will be another Condition # Get condition expression and do safety checks. # Might be a bit too strict? c, = operands ffc_assert(len(c) == 1 and firstkey(c) == (),\ "Condition for NotCondition should only be one function: " + repr(c)) sym = create_symbol(format["not"](str(c[()])), c[()].t, base_op=c[()].ops()+1) return {(): sym} def binary_condition(self, o, *operands): # Get LHS and RHS expressions and do safety checks. # Might be a bit too strict? lhs, rhs = operands ffc_assert(len(lhs) == 1 and firstkey(lhs) == (),\ "LHS of Condtion should only be one function: " + repr(lhs)) ffc_assert(len(rhs) == 1 and firstkey(rhs) == (),\ "RHS of Condtion should only be one function: " + repr(rhs)) # Map names from UFL to cpp.py. name_map = {"==":"is equal", "!=":"not equal",\ "<":"less than", ">":"greater than",\ "<=":"less equal", ">=":"greater equal",\ "&&": "and", "||": "or"} # Get the minimum type t = min(lhs[()].t, rhs[()].t) ops = lhs[()].ops() + rhs[()].ops() + 1 cond = str(lhs[()])+format[name_map[o._name]]+str(rhs[()]) sym = create_symbol(format["grouping"](cond), t, base_op=ops) return {(): sym} def conditional(self, o, *operands): # Get condition and return values; and do safety check. cond, true, false = operands ffc_assert(len(cond) == 1 and firstkey(cond) == (),\ "Condtion should only be one function: " + repr(cond)) ffc_assert(len(true) == 1 and firstkey(true) == (),\ "True value of Condtional should only be one function: " + repr(true)) ffc_assert(len(false) == 1 and firstkey(false) == (),\ "False value of Condtional should only be one function: " + repr(false)) # Get values and test for None t_val = true[()] f_val = false[()] # Get the minimum type and number of operations # TODO: conditionals are currently always located inside the ip loop, # therefore the type has to be at least IP (fix bug #1082048). This can # be optimised. t = min([cond[()].t, t_val.t, f_val.t, IP]) ops = sum([cond[()].ops(), t_val.ops(), f_val.ops()]) # Create expression for conditional # TODO: Handle this differently to expose the variables which are used # to create the expressions. expr = create_symbol(format["evaluate conditional"](cond[()], t_val, f_val), t) num = len(self.conditionals) name = create_symbol(format["conditional"](num), t) if not expr in self.conditionals: self.conditionals[expr] = (t, ops, num) else: num = self.conditionals[expr][2] name = create_symbol(format["conditional"](num), t) return {():name} # ------------------------------------------------------------------------- # FacetNormal, CellVolume, Circumradius, FacetArea (geometry.py). # ------------------------------------------------------------------------- def cell_coordinate(self, o): # FIXME error("This object should be implemented by the child class.") def facet_coordinate(self, o): # FIXME error("This object should be implemented by the child class.") def cell_origin(self, o): # FIXME error("This object should be implemented by the child class.") def facet_origin(self, o): # FIXME error("This object should be implemented by the child class.") def cell_facet_origin(self, o): # FIXME error("This object should be implemented by the child class.") def jacobian(self, o): # FIXME error("This object should be implemented by the child class.") def jacobian_determinant(self, o): # FIXME error("This object should be implemented by the child class.") def jacobian_inverse(self, o): # FIXME error("This object should be implemented by the child class.") def facet_jacobian(self, o): # FIXME error("This object should be implemented by the child class.") def facet_jacobian_determinant(self, o): # FIXME error("This object should be implemented by the child class.") def facet_jacobian_inverse(self, o): # FIXME error("This object should be implemented by the child class.") def cell_facet_jacobian(self, o): # FIXME error("This object should be implemented by the child class.") def cell_facet_jacobian_determinant(self, o): # FIXME error("This object should be implemented by the child class.") def cell_facet_jacobian_inverse(self, o): # FIXME error("This object should be implemented by the child class.") def facet_normal(self, o): components = self.component() # Safety check. ffc_assert(len(components) == 1, "FacetNormal expects 1 component index: " + repr(components)) # Handle 1D as a special case. # FIXME: KBO: This has to change for mD elements in R^n : m < n if self.gdim == 1: # FIXME: MSA UFL uses shape (1,) now, can we remove the special case here then? normal_component = format["normal component"](self.restriction, "") else: normal_component = format["normal component"](self.restriction, components[0]) self.trans_set.add(normal_component) return {(): create_symbol(normal_component, GEO)} def cell_normal(self, o): # FIXME error("This object should be implemented by the child class.") def cell_volume(self, o): # FIXME: KBO: This has to change for higher order elements #detJ = format["det(J)"](self.restriction) #volume = format["absolute value"](detJ) #self.trans_set.add(detJ) volume = format["cell volume"](self.restriction) self.trans_set.add(volume) return {():create_symbol(volume, GEO)} def circumradius(self, o): # FIXME: KBO: This has to change for higher order elements circumradius = format["circumradius"](self.restriction) self.trans_set.add(circumradius) return {():create_symbol(circumradius, GEO)} def facet_area(self, o): # FIXME: KBO: This has to change for higher order elements # NOTE: Omitting restriction because the area of a facet is the same # on both sides. # FIXME: Since we use the scale factor, facet area has no meaning # for cell integrals. (Need check in FFC or UFL). area = format["facet area"] self.trans_set.add(area) return {():create_symbol(area, GEO)} def min_facet_edge_length(self, o): # FIXME: this has no meaning for cell integrals. (Need check in FFC or UFL). tdim = self.tdim # FIXME: o.domain().topological_dimension() ??? if tdim < 3: return self.facet_area(o) edgelen = format["min facet edge length"](self.restriction) self.trans_set.add(edgelen) return {():create_symbol(edgelen, GEO)} def max_facet_edge_length(self, o): # FIXME: this has no meaning for cell integrals. (Need check in FFC or UFL). tdim = self.tdim # FIXME: o.domain().topological_dimension() ??? if tdim < 3: return self.facet_area(o) edgelen = format["max facet edge length"](self.restriction) self.trans_set.add(edgelen) return {():create_symbol(edgelen, GEO)} def cell_orientation(self, o): # FIXME error("This object should be implemented by the child class.") def quadrature_weight(self, o): # FIXME error("This object should be implemented by the child class.") # ------------------------------------------------------------------------- def create_argument(self, ufl_argument, derivatives, component, local_comp, local_offset, ffc_element, transformation, multiindices, tdim, gdim, avg): "Create code for basis functions, and update relevant tables of used basis." # Prefetch formats to speed up code generation. f_transform = format["transform"] f_detJ = format["det(J)"] # Reset code code = {} # Affine mapping if transformation == "affine": # Loop derivatives and get multi indices. for multi in multiindices: deriv = [multi.count(i) for i in range(tdim)] if not any(deriv): deriv = [] # Create mapping and basis name. mapping, basis = self._create_mapping_basis(component, deriv, avg, ufl_argument, ffc_element) if not mapping in code: code[mapping] = [] if basis is not None: # Add transformation if needed. code[mapping].append(self.__apply_transform(basis, derivatives, multi, tdim, gdim)) # Handle non-affine mappings. else: ffc_assert(avg is None, "Taking average is not supported for non-affine mappings.") # Loop derivatives and get multi indices. for multi in multiindices: deriv = [multi.count(i) for i in range(tdim)] if not any(deriv): deriv = [] for c in range(tdim): # Create mapping and basis name. mapping, basis = self._create_mapping_basis(c + local_offset, deriv, avg, ufl_argument, ffc_element) if not mapping in code: code[mapping] = [] if basis is not None: # Multiply basis by appropriate transform. if transformation == "covariant piola": dxdX = create_symbol(f_transform("JINV", c, local_comp, tdim, gdim, self.restriction), GEO) basis = create_product([dxdX, basis]) elif transformation == "contravariant piola": detJ = create_fraction(create_float(1), create_symbol(f_detJ(self.restriction), GEO)) dXdx = create_symbol(f_transform("J", local_comp, c, gdim, tdim, self.restriction), GEO) basis = create_product([detJ, dXdx, basis]) else: error("Transformation is not supported: " + repr(transformation)) # Add transformation if needed. code[mapping].append(self.__apply_transform(basis, derivatives, multi, tdim, gdim)) # Add sums and group if necessary. for key, val in list(code.items()): if len(val) > 1: code[key] = create_sum(val) else: code[key] = val[0] return code def create_function(self, ufl_function, derivatives, component, local_comp, local_offset, ffc_element, is_quad_element, transformation, multiindices, tdim, gdim, avg): "Create code for basis functions, and update relevant tables of used basis." ffc_assert(ufl_function in self._function_replace_values, "Expecting ufl_function to have been mapped prior to this call.") # Prefetch formats to speed up code generation. f_transform = format["transform"] f_detJ = format["det(J)"] # Reset code code = [] # Handle affine mappings. if transformation == "affine": # Loop derivatives and get multi indices. for multi in multiindices: deriv = [multi.count(i) for i in range(tdim)] if not any(deriv): deriv = [] # Create function name. function_name = self._create_function_name(component, deriv, avg, is_quad_element, ufl_function, ffc_element) if function_name: # Add transformation if needed. code.append(self.__apply_transform(function_name, derivatives, multi, tdim, gdim)) # Handle non-affine mappings. else: ffc_assert(avg is None, "Taking average is not supported for non-affine mappings.") # Loop derivatives and get multi indices. for multi in multiindices: deriv = [multi.count(i) for i in range(tdim)] if not any(deriv): deriv = [] for c in range(tdim): function_name = self._create_function_name(c + local_offset, deriv, avg, is_quad_element, ufl_function, ffc_element) if function_name: # Multiply basis by appropriate transform. if transformation == "covariant piola": dxdX = create_symbol(f_transform("JINV", c, local_comp, tdim, gdim, self.restriction), GEO) function_name = create_product([dxdX, function_name]) elif transformation == "contravariant piola": detJ = create_fraction(create_float(1), create_symbol(f_detJ(self.restriction), GEO)) dXdx = create_symbol(f_transform("J", local_comp, c, gdim, tdim, self.restriction), GEO) function_name = create_product([detJ, dXdx, function_name]) else: error("Transformation is not supported: ", repr(transformation)) # Add transformation if needed. code.append(self.__apply_transform(function_name, derivatives, multi, tdim, gdim)) if not code: return create_float(0.0) elif len(code) > 1: code = create_sum(code) else: code = code[0] return code # ------------------------------------------------------------------------- # Helper functions for Argument and Coefficient # ------------------------------------------------------------------------- def __apply_transform(self, function, derivatives, multi, tdim, gdim): "Apply transformation (from derivatives) to basis or function." f_transform = format["transform"] # Add transformation if needed. transforms = [] if not self.integral_type == "custom": for i, direction in enumerate(derivatives): ref = multi[i] t = f_transform("JINV", ref, direction, tdim, gdim, self.restriction) transforms.append(create_symbol(t, GEO)) transforms.append(function) return create_product(transforms) # ------------------------------------------------------------------------- # Helper functions for transformation of UFL objects in base class # ------------------------------------------------------------------------- def _create_symbol(self, symbol, domain): return {():create_symbol(symbol, domain)} def _create_product(self, symbols): return create_product(symbols) def _format_scalar_value(self, value): #print("format_scalar_value: %d" % value) if value is None: return {():create_float(0.0)} return {():create_float(value)} def _math_function(self, operands, format_function): #print("Calling _math_function() of optimisedquadraturetransformer.") # TODO: Are these safety checks needed? ffc_assert(len(operands) == 1 and () in operands[0] and len(operands[0]) == 1, \ "MathFunctions expect one operand of function type: " + repr(operands)) # Use format function on value of operand. operand = operands[0] for key, val in list(operand.items()): new_val = create_symbol(format_function(str(val)), val.t, val, 1) operand[key] = new_val #raise Exception("pause") return operand def _bessel_function(self, operands, format_function): # TODO: Are these safety checks needed? # TODO: work on reference instead of copies? (like math_function) ffc_assert(len(operands) == 2,\ "BesselFunctions expect two operands of function type: " + repr(operands)) nu, x = operands ffc_assert(len(nu) == 1 and () in nu,\ "Expecting one operand of function type as first argument to BesselFunction : " + repr(nu)) ffc_assert(len(x) == 1 and () in x,\ "Expecting one operand of function type as second argument to BesselFunction : " + repr(x)) nu = nu[()] x = x[()] if nu is None: nu = format["floating point"](0.0) if x is None: x = format["floating point"](0.0) sym = create_symbol(format_function(x,nu), x.t, x, 1) return {():sym} # ------------------------------------------------------------------------- # Helper functions for code_generation() # ------------------------------------------------------------------------- def _count_operations(self, expression): return expression.ops() def _create_entry_data(self, val, integral_type): # zero = False # Multiply value by weight and determinant ACCESS = GEO weight = format["weight"](self.points) if self.points > 1: weight += format["component"]("", format["integration points"]) ACCESS = IP weight = self._create_symbol(weight, ACCESS)[()] # Create value. if integral_type in ("vertex", "custom"): trans_set = set() value = create_product([val, weight]) else: f_scale_factor = format["scale factor"] trans_set = set([f_scale_factor]) value = create_product([val, weight, create_symbol(f_scale_factor, GEO)]) # Update sets of used variables (if they will not be used because of # optimisations later, they will be reset). trans_set.update([str(x) for x in value.get_unique_vars(GEO)]) used_points = set([self.points]) ops = self._count_operations(value) used_psi_tables = set([self.psi_tables_map[b] for b in value.get_unique_vars(BASIS)]) return (value, ops, [trans_set, used_points, used_psi_tables]) ffc-1.6.0/ffc/quadrature/parameters.py000066400000000000000000000052771255571034100177220ustar00rootroot00000000000000"Quadrature representation class for UFL" # Copyright (C) 2009-2014 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Anders Logg 2009, 2014 # Modified by Martin Alnaes 2013-2014 # FFC modules from ffc.log import warning def parse_optimise_parameters(parameters, itg_data): # Initialize parameters optimise_parameters = {"eliminate zeros": False, "optimisation": False, "ignore ones": False, "remove zero terms": False, "ignore zero tables": False} # Set optimized parameters if parameters["optimize"] and itg_data.integral_type == "custom": warning("Optimization not available for custom integrals, skipping optimization.") elif parameters["optimize"]: optimise_parameters["ignore ones"] = True optimise_parameters["remove zero terms"] = True optimise_parameters["ignore zero tables"] = True # Do not include this in below if/else clause since we want to be # able to switch on this optimisation in addition to the other # optimisations. if "eliminate_zeros" in parameters: optimise_parameters["eliminate zeros"] = True if "simplify_expressions" in parameters: optimise_parameters["optimisation"] = "simplify_expressions" elif "precompute_ip_const" in parameters: optimise_parameters["optimisation"] = "precompute_ip_const" elif "precompute_basis_const" in parameters: optimise_parameters["optimisation"] = "precompute_basis_const" # The current default optimisation (for -O) is equal to # '-feliminate_zeros -fsimplify_expressions'. else: # If '-O -feliminate_zeros' was given on the command line, do not # simplify expressions if not "eliminate_zeros" in parameters: optimise_parameters["eliminate zeros"] = True optimise_parameters["optimisation"] = "simplify_expressions" return optimise_parameters ffc-1.6.0/ffc/quadrature/product.py000066400000000000000000000405261255571034100172330ustar00rootroot00000000000000"This file implements a class to represent a product." # Copyright (C) 2009-2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2009-07-12 # Last changed: 2010-03-11 # FFC modules. from ffc.log import error from ffc.cpp import format # FFC quadrature modules. from .symbolics import create_float from .symbolics import create_product from .symbolics import create_sum from .symbolics import create_fraction from .expr import Expr from functools import reduce #class Product(object): class Product(Expr): __slots__ = ("vrs", "_expanded") def __init__(self, variables): """Initialise a Product object, it derives from Expr and contains the additional variables: vrs - a list of variables _expanded - object, an expanded object of self, e.g., self = x*(2+y) -> self._expanded = (2*x + x*y) (a sum), or self = 2*x -> self._expanded = 2*x (self). NOTE: self._prec = 2.""" # Initialise value, list of variables, class. self.val = 1.0 self.vrs = [] self._prec = 2 # Initially set _expanded to True. self._expanded = True # Process variables if we have any. if variables: # Remove nested Products and test for expansion. float_val = 1.0 for var in variables: # If any value is zero the entire product is zero. if var.val == 0.0: self.val = 0.0 self.vrs = [create_float(0.0)] float_val = 0.0 break # Collect floats into one variable if var._prec == 0: # float float_val *= var.val continue # Take care of product such that we don't create nested products. elif var._prec == 2: # prod # if var.vrs[0]._prec == 0: # float_val *= var.vrs[0].val # self.vrs += var.vrs[1:] # continue # self.vrs += var.vrs # continue # If expanded product is a float, just add it. if var._expanded and var._expanded._prec == 0: float_val *= var._expanded.val # If expanded product is symbol, this product is still expanded and add symbol. elif var._expanded and var._expanded._prec == 1: self.vrs.append(var._expanded) # If expanded product is still a product, add the variables. elif var._expanded and var._expanded._prec == 2: # self.vrs.append(var) # Add copies of the variables of other product (collect floats). if var._expanded.vrs[0]._prec == 0: float_val *= var._expanded.vrs[0].val self.vrs += var._expanded.vrs[1:] continue self.vrs += var._expanded.vrs # If expanded product is a sum or fraction, we must expand this product later. elif var._expanded and var._expanded._prec in (3, 4): self._expanded = False self.vrs.append(var._expanded) # Else the product is not expanded, and we must expand this one later else: self._expanded = False # Add copies of the variables of other product (collect floats). if var.vrs[0]._prec == 0: float_val *= var.vrs[0].val self.vrs += var.vrs[1:] continue self.vrs += var.vrs continue # If we have sums or fractions in the variables the product is not expanded. elif var._prec in (3, 4): # sum or frac self._expanded = False # Just add any variable at this point to list of new vars. self.vrs.append(var) # If value is 1 there is no need to include it, unless it is the # only parameter left i.e., 2*0.5 = 1. if float_val and float_val != 1.0: self.val = float_val self.vrs.append(create_float(float_val)) # If we no longer have any variables add the float. elif not self.vrs: self.val = float_val self.vrs = [create_float(float_val)] # If 1.0 is the only value left, add it. elif abs(float_val - 1.0) < format["epsilon"] and not self.vrs: self.val = 1.0 self.vrs = [create_float(1)] # If we don't have any variables the product is zero. else: self.val = 0.0 self.vrs = [create_float(0)] # The type is equal to the lowest variable type. self.t = min([v.t for v in self.vrs]) # Sort the variables such that comparisons work. self.vrs.sort() # Compute the representation now, such that we can use it directly # in the __eq__ and __ne__ methods (improves performance a bit, but # only when objects are cached). self._repr = "Product([%s])" % ", ".join([v._repr for v in self.vrs]) # Use repr as hash value. self._hash = hash(self._repr) # Store self as expanded value, if we did not encounter any sums or fractions. if self._expanded: self._expanded = self # Print functions. def __str__(self): "Simple string representation which will appear in the generated code." # If we have more than one variable and the first float is -1 exlude the 1. if len(self.vrs) > 1 and self.vrs[0]._prec == 0 and self.vrs[0].val == -1.0: # Join string representation of members by multiplication return format["sub"](["", format["mul"]([str(v) for v in self.vrs[1:]])]) return format["mul"]([str(v) for v in self.vrs]) # Binary operators. def __add__(self, other): "Addition by other objects." # NOTE: Assuming expanded variables. # If two products are equal, add their float values. if other._prec == 2 and self.get_vrs() == other.get_vrs(): # Return expanded product, to get rid of 3*x + -2*x -> x, not 1*x. return create_product([create_float(self.val + other.val)] + list(self.get_vrs())).expand() # if self == 2*x and other == x return 3*x. elif other._prec == 1: # sym if self.get_vrs() == (other,): # Return expanded product, to get rid of -x + x -> 0, not product(0). return create_product([create_float(self.val + 1.0), other]).expand() # Return sum return create_sum([self, other]) def __sub__(self, other): "Subtract other objects." if other._prec == 2 and self.get_vrs() == other.get_vrs(): # Return expanded product, to get rid of 3*x + -2*x -> x, not 1*x. return create_product([create_float(self.val - other.val)] + list(self.get_vrs())).expand() # if self == 2*x and other == x return 3*x. elif other._prec == 1: # sym if self.get_vrs() == (other,): # Return expanded product, to get rid of -x + x -> 0, not product(0). return create_product([create_float(self.val - 1.0), other]).expand() # Return sum return create_sum([self, create_product([FloatValue(-1), other])]) def __mul__(self, other): "Multiplication by other objects." # If product will be zero. if self.val == 0.0 or other.val == 0.0: return create_float(0) # If other is a Sum or Fraction let them handle it. if other._prec in (3, 4): # sum or frac return other.__mul__(self) # NOTE: We expect expanded sub-expressions with no nested operators. # Create new product adding float or symbol. if other._prec in (0, 1): # float or sym return create_product(self.vrs + [other]) # Create new product adding all variables from other Product. return create_product(self.vrs + other.vrs) def __truediv__(self, other): "Division by other objects." # If division is illegal (this should definitely not happen). if other.val == 0.0: error("Division by zero.") # If fraction will be zero. if self.val == 0.0: return self.vrs[0] # If other is a Sum we can only return a fraction. # NOTE: Expect that other is expanded i.e., x + x -> 2*x which can be handled # TODO: Fix x / (x + x*y) -> 1 / (1 + y). # Or should this be handled when reducing a fraction? if other._prec == 3: # sum return create_fraction(self, other) # Handle division by FloatValue, Symbol, Product and Fraction. # NOTE: assuming that we get expanded variables. # Copy numerator, and create list for denominator. num = self.vrs[:] denom = [] # Add floatvalue, symbol and products to the list of denominators. if other._prec in (0, 1): # float or sym denom = [other] elif other._prec == 2: # prod # Get copy. denom = other.vrs[:] # fraction. else: error("Did not expected to divide by fraction.") # Loop entries in denominator and remove from numerator (and denominator). for d in denom[:]: # Add the inverse of a float to the numerator and continue. if d._prec == 0: # float num.append(create_float(1.0/d.val)) denom.remove(d) continue if d in num: num.remove(d) denom.remove(d) # Create appropriate return value depending on remaining data. if len(num) > 1: # TODO: Make this more efficient? # Create product and expand to reduce # Product([5, 0.2]) == Product([1]) -> Float(1). num = create_product(num).expand() elif num: num = num[0] # If all variables in the numerator has been eliminated we need to add '1'. else: num = create_float(1) if len(denom) > 1: return create_fraction(num, create_product(denom)) elif denom: return create_fraction(num, denom[0]) # If we no longer have a denominater, just return the numerator. return num __div__ = __truediv__ # Public functions. def expand(self): "Expand all members of the product." # If we just have one variable, compute the expansion of it # (it is not a Product, so it should be safe). We need this to get # rid of Product([Symbol]) type expressions. if len(self.vrs) == 1: self._expanded = self.vrs[0].expand() return self._expanded # If product is already expanded, simply return the expansion. if self._expanded: return self._expanded # Sort variables such that we don't call the '*' operator more than we have to. float_syms = [] sum_fracs = [] for v in self.vrs: if v._prec in (0, 1): # float or sym float_syms.append(v) continue exp = v.expand() # If the expanded expression is a float, sym or product, # we can add the variables. if exp._prec in (0, 1): # float or sym float_syms.append(exp) elif exp._prec == 2: # prod float_syms += exp.vrs else: sum_fracs.append(exp) # If we have floats or symbols add the symbols to the rest as a single # product (for speed). if len(float_syms) > 1: sum_fracs.append( create_product(float_syms) ) elif float_syms: sum_fracs.append(float_syms[0]) # Use __mult__ to reduce list to one single variable. # TODO: Can this be done more efficiently without creating all the # intermediate variables? self._expanded = reduce(lambda x,y: x*y, sum_fracs) return self._expanded def get_unique_vars(self, var_type): "Get unique variables (Symbols) as a set." # Loop all members and update the set. var = set() for v in self.vrs: var.update(v.get_unique_vars(var_type)) return var def get_var_occurrences(self): """Determine the number of times all variables occurs in the expression. Returns a dictionary of variables and the number of times they occur.""" # TODO: The product should be expanded at this stage, should we check # this? # Create dictionary and count number of occurrences of each variable. d = {} for v in self.vrs: if v in d: d[v] += 1 continue d[v] = 1 return d def get_vrs(self): "Return all 'real' variables." # A product should only have one float value after initialisation. # TODO: Use this knowledge directly in other classes? if self.vrs[0]._prec == 0: # float return tuple(self.vrs[1:]) return tuple(self.vrs) def ops(self): "Get the number of operations to compute product." # It takes n-1 operations ('*') for a product of n members. op = len(self.vrs) - 1 # Loop members and add their count. for v in self.vrs: op += v.ops() # Subtract 1, if the first member is -1 i.e., -1*x*y -> x*y is only 1 op. if self.vrs[0]._prec == 0 and self.vrs[0].val == -1.0: op -= 1 return op def reduce_ops(self): "Reduce the number of operations to evaluate the product." # It's not possible to reduce a product if it is already expanded and # it should be at this stage. # TODO: Is it safe to return self.expand().reduce_ops() if product is # not expanded? And do we want to? # if self._expanded: # return self._expanded # error("Product must be expanded first before we can reduce the number of operations.") # TODO: This should crash if it goes wrong (the above is more correct but slower). return self._expanded def reduce_vartype(self, var_type): """Reduce expression with given var_type. It returns a tuple (found, remain), where 'found' is an expression that only has variables of type == var_type. If no variables are found, found=(). The 'remain' part contains the leftover after division by 'found' such that: self = found*remain.""" # Sort variables according to type. found = [] remains = [] for v in self.vrs: if v.t == var_type: found.append(v) continue remains.append(v) # Create appropriate object for found. if len(found) > 1: found = create_product(found) elif found: found = found.pop() # We did not find any variables. else: return [((), self)] # Create appropriate object for remains. if len(remains) > 1: remains = create_product(remains) elif remains: remains = remains.pop() # We don't have anything left. else: return [(self, create_float(1))] # Return whatever we found. return [(found, remains)] # FFC quadrature modules. from .floatvalue import FloatValue from .symbol import Symbol from .sumobj import Sum from .fraction import Fraction ffc-1.6.0/ffc/quadrature/quadraturegenerator.py000066400000000000000000001265211255571034100216370ustar00rootroot00000000000000"Code generator for quadrature representation." # Copyright (C) 2009-2014 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Mehdi Nikbakht 2010 # Modified by Anders Logg 2013-2014 # Modified by Martin Alnaes 2013-2014 # Python modules import functools, itertools import numpy # UFL modules from ufl.utils.sorting import sorted_by_key from ufl.utils.derivativetuples import compute_derivative_tuples # FFC modules from ffc.log import info, debug, ffc_assert, error, warning from ffc.cpp import format, remove_unused from ffc.representationutils import initialize_integral_code # Utility and optimization functions for quadraturegenerator from ffc.quadrature.symbolics import generate_aux_constants def generate_integral_code(ir, prefix, parameters): "Generate code for integral from intermediate representation." # Prefetch formatting to speedup code generation (well...) ret = format["return"] # Generate code code = initialize_integral_code(ir, prefix, parameters) code["num_cells"] = ret(ir["num_cells"]) code["tabulate_tensor"] = _tabulate_tensor(ir, prefix, parameters) code["additional_includes_set"] = ir["additional_includes_set"] return code def _tabulate_tensor(ir, prefix, parameters): "Generate code for a single integral (tabulate_tensor())." # Prefetch formatting to speedup code generation f_comment = format["comment"] f_G = format["geometry constant"] f_const_double = format["assign"] f_switch = format["switch"] f_float = format["float"] f_assign = format["assign"] f_A = format["element tensor"] f_r = format["free indices"][0] f_loop = format["generate loop"] f_int = format["int"] f_facet = format["facet"] # Get data opt_par = ir["optimise_parameters"] integral_type = ir["integral_type"] gdim = ir["geometric_dimension"] tdim = ir["topological_dimension"] num_facets = ir["num_facets"] num_vertices = ir["num_vertices"] prim_idims = ir["prim_idims"] integrals = ir["trans_integrals"] geo_consts = ir["geo_consts"] oriented = ir["needs_oriented"] element_data = ir["element_data"] num_cells = ir["num_cells"] # Create sets of used variables used_weights = set() used_psi_tables = set() used_nzcs = set() trans_set = set() sets = [used_weights, used_psi_tables, used_nzcs, trans_set] affine_tables = {} # TODO: This is not populated anywhere, remove? quadrature_weights = ir["quadrature_weights"] operations = [] if integral_type == "cell": # Generate code for computing element tensor tensor_code, mem_code, num_ops = _generate_element_tensor(integrals, sets, opt_par, gdim) tensor_code = "\n".join(tensor_code) # Set operations equal to num_ops (for printing info on operations). operations.append([num_ops]) # Generate code for basic geometric quantities jacobi_code = "" jacobi_code += format["compute_jacobian"](tdim, gdim) jacobi_code += "\n" jacobi_code += format["compute_jacobian_inverse"](tdim, gdim) if oriented: jacobi_code += format["orientation"](tdim, gdim) jacobi_code += "\n" jacobi_code += format["scale factor snippet"] # Generate code for cell volume and circumradius jacobi_code += "\n\n" + format["generate cell volume"](tdim, gdim, integral_type) jacobi_code += "\n\n" + format["generate circumradius"](tdim, gdim, integral_type) elif integral_type == "exterior_facet": # Iterate over facets cases = [None for i in range(num_facets)] for i in range(num_facets): # Update transformer with facets and generate case code + set of used geometry terms. c, mem_code, ops = _generate_element_tensor(integrals[i], sets, opt_par, gdim) case = [f_comment("Total number of operations to compute element tensor (from this point): %d" % ops)] case += c cases[i] = "\n".join(case) # Save number of operations (for printing info on operations). operations.append([i, ops]) # Generate tensor code for all cases using a switch. tensor_code = f_switch(f_facet(None), cases) # Generate code for basic geometric quantities jacobi_code = "" jacobi_code += format["compute_jacobian"](tdim, gdim) jacobi_code += "\n" jacobi_code += format["compute_jacobian_inverse"](tdim, gdim) if oriented: jacobi_code += format["orientation"](tdim, gdim) jacobi_code += "\n" jacobi_code += "\n\n" + format["facet determinant"](tdim, gdim) jacobi_code += "\n\n" + format["generate normal"](tdim, gdim, integral_type) jacobi_code += "\n\n" + format["generate facet area"](tdim, gdim) if tdim == 3: jacobi_code += "\n\n" + format["generate min facet edge length"](tdim, gdim) jacobi_code += "\n\n" + format["generate max facet edge length"](tdim, gdim) # Generate code for cell volume and circumradius jacobi_code += "\n\n" + format["generate cell volume"](tdim, gdim, integral_type) jacobi_code += "\n\n" + format["generate circumradius"](tdim, gdim, integral_type) elif integral_type == "interior_facet": # Modify the dimensions of the primary indices because we have a macro element prim_idims = [d*2 for d in prim_idims] # Iterate over combinations of facets cases = [[None for j in range(num_facets)] for i in range(num_facets)] for i in range(num_facets): for j in range(num_facets): # Update transformer with facets and generate case code + set of used geometry terms. c, mem_code, ops = _generate_element_tensor(integrals[i][j], sets, opt_par, gdim) case = [f_comment("Total number of operations to compute element tensor (from this point): %d" % ops)] case += c cases[i][j] = "\n".join(case) # Save number of operations (for printing info on operations). operations.append([i, j, ops]) # Generate tensor code for all cases using a switch. tensor_code = f_switch(f_facet("+"), [f_switch(f_facet("-"), cases[i]) for i in range(len(cases))]) # Generate code for basic geometric quantities jacobi_code = "" for _r in ["+", "-"]: jacobi_code += format["compute_jacobian"](tdim, gdim, r=_r) jacobi_code += "\n" jacobi_code += format["compute_jacobian_inverse"](tdim, gdim, r=_r) if oriented: jacobi_code += format["orientation"](tdim, gdim, r=_r) jacobi_code += "\n" jacobi_code += "\n\n" + format["facet determinant"](tdim, gdim, r="+") jacobi_code += "\n\n" + format["generate normal"](tdim, gdim, integral_type) jacobi_code += "\n\n" + format["generate facet area"](tdim, gdim) if tdim == 3: jacobi_code += "\n\n" + format["generate min facet edge length"](tdim, gdim, r="+") jacobi_code += "\n\n" + format["generate max facet edge length"](tdim, gdim, r="+") # Generate code for cell volume and circumradius jacobi_code += "\n\n" + format["generate cell volume"](tdim, gdim, integral_type) jacobi_code += "\n\n" + format["generate circumradius"](tdim, gdim, integral_type) elif integral_type == "vertex": # Iterate over vertices cases = [None for i in range(num_vertices)] for i in range(num_vertices): # Update transformer with vertices and generate case code + # set of used geometry terms. c, mem_code, ops = _generate_element_tensor(integrals[i], sets, opt_par, gdim) case = [f_comment("Total number of operations to compute element tensor (from this point): %d" % ops)] case += c cases[i] = "\n".join(case) # Save number of operations (for printing info on operations). operations.append([i, ops]) # Generate tensor code for all cases using a switch. tensor_code = f_switch(format["vertex"], cases) # Generate code for basic geometric quantities jacobi_code = "" jacobi_code += format["compute_jacobian"](tdim, gdim) jacobi_code += "\n" jacobi_code += format["compute_jacobian_inverse"](tdim, gdim) if oriented: jacobi_code += format["orientation"](tdim, gdim) jacobi_code += "\n" jacobi_code += "\n\n" + format["facet determinant"](tdim, gdim) # FIXME: This is not defined in a point??? elif integral_type == "custom": # Warning that more than two cells in only partly supported. # The missing piece is to couple multiple cells to # restrictions other than '+' and '-'. if num_cells > 2: warning("Custom integrals with more than two cells only partly supported.") # Modify the dimensions of the primary indices because we have a macro element if num_cells == 2: prim_idims = [d*2 for d in prim_idims] # Check whether we need to generate facet normals generate_custom_facet_normal = num_cells == 2 # Generate code for computing element tensor tensor_code, mem_code, num_ops = _generate_element_tensor(integrals, sets, opt_par, gdim, generate_custom_facet_normal) tensor_code = "\n".join(tensor_code) # Set operations equal to num_ops (for printing info on operations). operations.append([num_ops]) # FIXME: Jacobi code is only needed when we use cell volume or circumradius. # FIXME: Does not seem to be removed by removed_unused. # Generate code for basic geometric quantities jacobi_code = "" for i in range(num_cells): r = i if num_cells > 1 else None jacobi_code += "\n" jacobi_code += f_comment("--- Compute geometric quantities on cell %d ---" % i) jacobi_code += "\n\n" if num_cells > 1: jacobi_code += f_comment("Extract vertex coordinates\n") jacobi_code += format["extract_cell_coordinates"]((tdim + 1)*gdim*i, r=i) jacobi_code += "\n\n" jacobi_code += format["compute_jacobian"](tdim, gdim, r=r) jacobi_code += "\n" jacobi_code += format["compute_jacobian_inverse"](tdim, gdim, r=r) jacobi_code += "\n" jacobi_code += format["generate cell volume"](tdim, gdim, integral_type, r=r if num_cells > 1 else None) jacobi_code += "\n" jacobi_code += format["generate circumradius"](tdim, gdim, integral_type, r=r if num_cells > 1 else None) jacobi_code += "\n" else: error("Unhandled integral type: " + str(integral_type)) # After we have generated the element code for all facets we can remove # the unused transformations. common = [remove_unused(jacobi_code, trans_set)] # FIXME: After introduction of custom integrals, the common code # here is not really common anymore. Think about how to # restructure this function. # Add common code except for custom integrals if integral_type != "custom": common += _tabulate_weights([quadrature_weights[p] for p in sorted(used_weights)]) # Add common code for updating tables name_map = ir["name_map"] tables = ir["unique_tables"] tables.update(affine_tables) # TODO: This is not populated anywhere, remove? common += _tabulate_psis(tables, used_psi_tables, name_map, used_nzcs, opt_par, integral_type, gdim) # Add special tabulation code for custom integral else: common += _evaluate_basis_at_quadrature_points(used_psi_tables, gdim, element_data, prefix, num_vertices, num_cells) # Reset the element tensor (array 'A' given as argument to tabulate_tensor() by assembler) # Handle functionals. common += [f_comment("Reset values in the element tensor.")] value = f_float(0) if prim_idims == []: common += [f_assign(f_A(f_int(0)), f_float(0))] else: dim = functools.reduce(lambda v, u: v*u, prim_idims) common += f_loop([f_assign(f_A(f_r), f_float(0))], [(f_r, 0, dim)]) # Create the constant geometry declarations (only generated if simplify expressions are enabled). geo_ops, geo_code = generate_aux_constants(geo_consts, f_G, f_const_double) if geo_code: common += [f_comment("Number of operations to compute geometry constants: %d." % geo_ops)] common += [format["declaration"](format["float declaration"], f_G(len(geo_consts)))] common += geo_code # Add comments. common += ["", f_comment("Compute element tensor using UFL quadrature representation")] common += [f_comment("Optimisations: %s" % ", ".join([str((k, opt_par[k]))\ for k in sorted(opt_par.keys())]))] # Print info on operation count. message = {"cell": "Cell, number of operations to compute tensor: %s", "exterior_facet": "Exterior facet %d, number of operations to compute tensor: %s", "interior_facet": "Interior facets (%d, %d), number of operations to compute tensor: %s", "vertex": "Vertex %s, number of operations to compute tensor: %s", "custom": "Custom domain, number of operations to compute tensor: %s"} for ops in operations: # Add geo ops count to integral ops count for writing info. if isinstance(ops[-1], int): ops[-1] += geo_ops return "\n".join(common) + "\n" + tensor_code def _generate_element_tensor(integrals, sets, optimise_parameters, gdim, generate_custom_facet_normal=False): "Construct quadrature code for element tensors." # Prefetch formats to speed up code generation. f_comment = format["comment"] f_ip = format["integration points"] f_I = format["ip constant"] f_loop = format["generate loop"] f_ip_coords = format["generate ip coordinates"] f_coords = format["vertex_coordinates"] f_double = format["float declaration"] f_decl = format["declaration"] f_X = format["ip coordinates"] f_C = format["conditional"] # Initialise return values. element_code = [] tensor_ops_count = 0 # TODO: KBO: The members_code was used when I generated the load_table.h # file which could load tables of basisfunction. This feature has not # been reimplemented. However, with the new design where we only # tabulate unique tables (and only non-zero entries) it doesn't seem to # be necessary. Should it be deleted? members_code = "" # We receive a dictionary {num_points: form,}. # Loop points and forms. for points, terms, functions, ip_consts, coordinate, conditionals in integrals: element_code += ["", f_comment("Loop quadrature points for integral.")] ip_code = [] num_ops = 0 # Generate code to compute coordinates if used. if coordinate: name, gdim, ip, r = coordinate element_code += ["", f_comment("Declare array to hold physical coordinate of quadrature point.")] element_code += [f_decl(f_double, f_X(points, gdim))] ops, coord_code = f_ip_coords(gdim, points, name, ip, r) ip_code += ["", f_comment("Compute physical coordinate of quadrature point, operations: %d." % ops)] ip_code += [coord_code] num_ops += ops # Update used psi tables and transformation set. sets[1].add(name) sets[3].add(f_coords(r)) # Generate code to compute function values. if functions: func_code, ops = _generate_functions(functions, sets) ip_code += func_code num_ops += ops # Generate code to compute conditionals (might depend on coordinates # and function values so put here). # TODO: Some conditionals might only depend on geometry so they # should be moved outside if possible. if conditionals: ip_code += [f_decl(f_double, f_C(len(conditionals)))] # Sort conditionals (need to in case of nested conditionals). reversed_conds = dict([(n, (o, e)) for e, (t, o, n) in conditionals.items()]) for num in range(len(conditionals)): name = format["conditional"](num) ops, expr = reversed_conds[num] ip_code += [f_comment("Compute conditional, operations: %d." % ops)] ip_code += [format["assign"](name, expr)] num_ops += ops # Generate code for ip constant declarations. # ip_const_ops, ip_const_code = generate_aux_constants(ip_consts, f_I,\ # format["const float declaration"], True) ip_const_ops, ip_const_code = generate_aux_constants(ip_consts, f_I,\ format["assign"], True) num_ops += ip_const_ops if ip_const_code: ip_code += ["", f_comment("Number of operations to compute ip constants: %d" %ip_const_ops)] ip_code += [format["declaration"](format["float declaration"], f_I(len(ip_consts)))] ip_code += ip_const_code # Generate code to evaluate the element tensor. integral_code, ops = _generate_integral_code(points, terms, sets, optimise_parameters) num_ops += ops if points is None: quadrature_ops = "unknown" tensor_ops_count = "unknown" else: quadrature_ops = num_ops*points tensor_ops_count += quadrature_ops ip_code += integral_code element_code.append(f_comment\ ("Number of operations to compute element tensor for following IP loop = %s" % str(quadrature_ops))) # Generate code for custom facet normal if necessary if generate_custom_facet_normal: for line in ip_code: if "n_00" in line: ip_code = [format["facet_normal_custom"](gdim)] + ip_code break # Loop code over all IPs. if points == 0: element_code.append(f_comment("Only 1 integration point, omitting IP loop.")) element_code += ip_code elif points is None: num_points = "num_quadrature_points" element_code += f_loop(ip_code, [(f_ip, 0, num_points)]) else: element_code += f_loop(ip_code, [(f_ip, 0, points)]) return (element_code, members_code, tensor_ops_count) def _generate_functions(functions, sets): "Generate declarations for functions and code to compute values." f_comment = format["comment"] f_double = format["float declaration"] f_F = format["function value"] f_float = format["floating point"] f_decl = format["declaration"] f_r = format["free indices"][0] f_iadd = format["iadd"] f_loop = format["generate loop"] # Create the function declarations. code = ["", f_comment("Coefficient declarations.")] code += [f_decl(f_double, f_F(n), f_float(0)) for n in range(len(functions))] # Get sets. used_psi_tables = sets[1] used_nzcs = sets[2] # Sort functions after loop ranges. function_list = {} for key, val in functions.items(): if val[1] in function_list: function_list[val[1]].append(key) else: function_list[val[1]] = [key] total_ops = 0 # Loop ranges and get list of functions. for loop_range, list_of_functions in sorted(function_list.items()): function_expr = {} function_numbers = [] # Loop functions. func_ops = 0 for function in list_of_functions: # Get name and number. number, range_i, ops, psi_name, u_nzcs, ufl_element = functions[function] # Add name to used psi names and non zeros name to used_nzcs. used_psi_tables.add(psi_name) used_nzcs.update(u_nzcs) # TODO: This check can be removed for speed later. ffc_assert(number not in function_expr, "This is definitely not supposed to happen!") # Convert function (that might be a symbol) to a simple string and save. function = str(function) function_expr[number] = function # Get number of operations to compute entry and add to function operations count. func_ops += (ops + 1)*range_i # Add function operations to total count total_ops += func_ops code += ["", f_comment("Total number of operations to compute function values = %d" % func_ops)] # Sort the functions according to name and create loop to compute the function values. lines = [f_iadd(f_F(n), function_expr[n]) for n in sorted(function_expr.keys())] code += f_loop(lines, [(f_r, 0, loop_range)]) # TODO: If loop_range == 1, this loop may be unneccessary. Not sure if it's safe to just skip it. return code, total_ops def _generate_integral_code(points, terms, sets, optimise_parameters): "Generate code to evaluate the element tensor." # Prefetch formats to speed up code generation. f_comment = format["comment"] f_mul = format["mul"] f_scale_factor = format["scale factor"] f_iadd = format["iadd"] f_add = format["add"] f_A = format["element tensor"] f_loop = format["generate loop"] f_B = format["basis constant"] # Initialise return values. code = [] num_ops = 0 loops = {} # Extract sets. used_weights, used_psi_tables, used_nzcs, trans_set = sets # Loop terms and create code. for loop, (data, entry_vals) in sorted(terms.items()): # If we don't have any entry values, there's no need to generate the # loop. if not entry_vals: continue # Get data. t_set, u_weights, u_psi_tables, u_nzcs, basis_consts = data # If we have a value, then we also need to update the sets of used variables. trans_set.update(t_set) used_weights.update(u_weights) used_psi_tables.update(u_psi_tables) used_nzcs.update(u_nzcs) # Generate code for basis constant declarations. # basis_const_ops, basis_const_code = generate_aux_constants(basis_consts, f_B,\ # format["const float declaration"], True) basis_const_ops, basis_const_code = generate_aux_constants(basis_consts, f_B,\ format["assign"], True) decl_code = [] if basis_consts: decl_code = [format["declaration"](format["float declaration"], f_B(len(basis_consts)))] loops[loop] = [basis_const_ops, decl_code + basis_const_code] for entry, value, ops in entry_vals: # Compute number of operations to compute entry # (add 1 because of += in assignment). entry_ops = ops + 1 # Create comment for number of operations entry_ops_comment = f_comment("Number of operations to compute entry: %d" % entry_ops) entry_code = f_iadd(f_A(entry), value) loops[loop][0] += entry_ops loops[loop][1] += [entry_ops_comment, entry_code] # Write all the loops of basis functions. for loop, ops_lines in sorted(loops.items()): ops, lines = ops_lines prim_ops = functools.reduce(lambda i, j: i*j, [ops] + [l[2] for l in loop]) # Add number of operations for current loop to total count. num_ops += prim_ops code += ["", f_comment("Number of operations for primary indices: %d" % prim_ops)] code += f_loop(lines, loop) return code, num_ops def _tabulate_weights(quadrature_weights): "Generate table of quadrature weights." # Prefetch formats to speed up code generation. f_float = format["floating point"] f_table = format["static const float declaration"] f_sep = format["list separator"] f_weight = format["weight"] f_component = format["component"] f_group = format["grouping"] f_decl = format["declaration"] f_tensor = format["tabulate tensor"] f_comment = format["comment"] f_int = format["int"] code = ["", f_comment("Array of quadrature weights.")] # Loop tables of weights and create code. for weights, points in quadrature_weights: # FIXME: For now, raise error if we don't have weights. # We might want to change this later. ffc_assert(weights.any(), "No weights.") # Create name and value for weight. num_points = len(points) name = f_weight(num_points) value = f_float(weights[0]) if len(weights) > 1: name += f_component("", f_int(num_points)) value = f_tensor(weights) code += [f_decl(f_table, name, value)] # Tabulate the quadrature points (uncomment for different parameters). # 1) Tabulate the points as: p0, p1, p2, with p0 = (x0, y0, z0) etc. # Use f_float to format the value (enable variable precision). formatted_points = [f_group(f_sep.join([f_float(val) for val in point])) for point in points] # Create comment. comment = "Quadrature points on the UFC reference element: " \ + f_sep.join(formatted_points) code += [f_comment(comment)] # 2) Tabulate the coordinates of the points p0, p1, p2 etc. # X: x0, x1, x2 # Y: y0, y1, y2 # Z: z0, z1, z2 # comment = "Quadrature coordinates on the UFC reference element: " # code += [format["comment"](comment)] # # All points have the same number of coordinates. # num_coord = len(points[0]) # # All points have x-coordinates. # xs = [f_float(p[0]) for p in points] # comment = "X: " + f_sep.join(xs) # code += [format["comment"](comment)] # ys = [] # zs = [] # # Tabulate y-coordinate if we have 2 or more coordinates. # if num_coord >= 2: # ys = [f_float(p[1]) for p in points] # comment = "Y: " + f_sep.join(ys) # code += [format["comment"](comment)] # # Only tabulate z-coordinate if we have 3 coordinates. # if num_coord == 3: # zs = [f_float(p[2]) for p in points] # comment = "Z: " + f_sep.join(zs) # code += [format["comment"](comment)] code += [""] return code def _tabulate_psis(tables, used_psi_tables, inv_name_map, used_nzcs, optimise_parameters, integral_type, gdim): "Tabulate values of basis functions and their derivatives at quadrature points." # Prefetch formats to speed up code generation. f_comment = format["comment"] f_table = format["static const float declaration"] f_vector_table = format["vector table declaration"] f_double_array = format["const double array declaration"] f_component = format["component"] f_const_uint = format["static const uint declaration"] f_nzcolumns = format["nonzero columns"] f_list = format["list"] f_decl = format["declaration"] f_tensor = format["tabulate tensor"] f_new_line = format["new line"] f_int = format["int"] f_eval_basis = format["call basis_all"] f_eval_derivs = format["call basis_derivatives_all"] f_loop = format["generate loop"] f_quad_point = format["quadrature point"] f_eval_basis = format["evaluate basis snippet"] # FIXME: Check if we can simplify the tabulation code = [] code += [f_comment("Values of basis functions at quadrature points.")] # Get list of non zero columns, if we ignore ones, ignore columns with one component. if optimise_parameters["ignore ones"]: nzcs = [] for key, val in sorted(inv_name_map.items()): # Check if we have a table of ones or if number of non-zero columns # is larger than one. if val[1] and len(val[1][1]) > 1 or not val[3]: nzcs.append(val[1]) else: nzcs = [val[1] for key, val in sorted(inv_name_map.items())\ if val[1]] # TODO: Do we get arrays that are not unique? new_nzcs = [] for nz in nzcs: # Only get unique arrays. if not nz in new_nzcs: new_nzcs.append(nz) # Construct name map. name_map = {} if inv_name_map: for name in sorted(inv_name_map): if inv_name_map[name][0] in name_map: name_map[inv_name_map[name][0]].append(name) else: name_map[inv_name_map[name][0]] = [name] # Loop items in table and tabulate. for name in sorted(used_psi_tables): # Only proceed if values are still used (if they're not remapped). vals = tables[name] if not vals is None: # Add declaration to name. ip, dofs = numpy.shape(vals) decl_name = f_component(name, [ip, dofs]) # Generate array of values. value = f_tensor(vals) code += [f_decl(f_table, decl_name, f_new_line + value), ""] # Tabulate non-zero indices. if optimise_parameters["eliminate zeros"]: if name in sorted(name_map): for n in name_map[name]: if inv_name_map[n][1] and inv_name_map[n][1] in new_nzcs: i, cols = inv_name_map[n][1] if not i in used_nzcs: continue code += [f_comment("Array of non-zero columns")] value = f_list([f_int(c) for c in list(cols)]) name_col = f_component(f_nzcolumns(i), len(cols)) code += [f_decl(f_const_uint, name_col, value), ""] # Remove from list of columns. new_nzcs.remove(inv_name_map[n][1]) return code def _evaluate_basis_at_quadrature_points(psi_tables, gdim, element_data, form_prefix, num_vertices, num_cells): "Generate code for calling evaluate basis (derivatives) at quadrature points" # Prefetch formats to speed up code generation f_comment = format["comment"] f_declaration = format["declaration"] f_static_array = format["static array"] f_loop = format["generate loop"] f_eval_basis_decl = format["eval_basis_decl"] f_eval_basis_init = format["eval_basis_init"] f_eval_basis = format["eval_basis"] f_eval_basis_copy = format["eval_basis_copy"] f_eval_derivs_decl = format["eval_derivs_decl"] f_eval_derivs_init = format["eval_derivs_init"] f_eval_derivs = format["eval_derivs"] f_eval_derivs_copy = format["eval_derivs_copy"] code = [] # Extract prefixes for tables prefixes = sorted(set(table.split("_")[0] for table in psi_tables)) # Use lower case prefix for form name form_prefix = form_prefix.lower() # The psi_tables used by the quadrature code are for scalar # components of specific derivatives, while tabulate_basis_all and # tabulate_basis_derivatives_all return data including all # possible components and derivatives. We therefore need to # iterate over prefixes (= elements), call tabulate_basis_all or # tabulate_basis_derivatives all, and then extract the relevant # data and fill in the psi_tables. We therefore need to extract # for each prefix, which tables need to be filled in. # For each unique prefix, check which derivatives and components are used used_derivatives_and_components = {} for prefix in prefixes: used_derivatives_and_components[prefix] = {} for table in psi_tables: if not prefix in table: continue # Check for derivative if "_D" in table: d = table.split("_D")[1].split("_")[0] n = sum([int(_d) for _d in d]) # FIXME: Assume at most 9 derivatives... else: n = 0 # Check for component if "_C" in table: c = table.split("_C")[1].split("_")[0] else: c = None # Note that derivative has been used if not n in used_derivatives_and_components[prefix]: used_derivatives_and_components[prefix][n] = set() used_derivatives_and_components[prefix][n].add(c) # Generate code for setting quadrature weights code += [f_comment("Set quadrature weights")] code += [f_declaration("const double*", "W", "quadrature_weights")] code += [""] # Generate code for calling evaluate_basis_[derivatives_]all for prefix in prefixes: # Get element data for current element counter = int(prefix.split("FE")[1]) element_number = element_data[counter]["element_number"] space_dim = element_data[counter]["num_element_dofs"] value_size = element_data[counter]["value_size"] # Iterate over derivative orders for n, components in sorted_by_key(used_derivatives_and_components[prefix]): # components are a set and need to be sorted components = sorted(components) # Code for evaluate_basis_all (n = 0 which means it's not a derivative) if n == 0: code += [f_comment("--- Evaluation of basis functions ---")] code += [""] # Compute variables for code generation eval_stride = value_size eval_size = space_dim*eval_stride table_size = num_cells*space_dim # Iterate over components and initialize tables for c in components: # Set name of table if c is None: table_name = prefix else: table_name = prefix + "_C%s" % c # Generate code for declaration of table code += [f_comment("Create table %s for basis function values on all cells" % table_name)] code += [f_eval_basis_decl % {"table_name": table_name}] code += [f_eval_basis_init % {"table_name": table_name, "table_size": table_size}] code += [""] # Iterate over cells in macro element and evaluate basis for cell_number in range(num_cells): # Compute variables for code generation eval_name = "%s_values_%d" % (prefix, cell_number) table_offset = cell_number*space_dim vertex_offset = cell_number*num_vertices*gdim # Generate block of code for loop block = [] # Generate code for calling evaluate_basis_all block += [f_eval_basis % {"form_prefix": form_prefix, "element_number": element_number, "eval_name": eval_name, "gdim": gdim, "vertex_offset": vertex_offset}] # Iterate over components and extract values for c in components: # Set name of table and component offset if c is None: table_name = prefix eval_offset = 0 else: table_name = prefix + "_C%s" % c eval_offset = int(c) # Generate code for copying values block += [""] block += [f_eval_basis_copy % {"table_name": table_name, "eval_name": eval_name, "eval_stride": eval_stride, "eval_offset": eval_offset, "space_dim": space_dim, "table_offset": table_offset}] # Generate code code += [f_comment("Evaluate basis functions on cell %d" % cell_number)] code += [f_static_array("double", eval_name, eval_size)] code += f_loop(block, [("ip", 0, "num_quadrature_points")]) code += [""] # Code for evaluate_basis_derivatives_all (derivative of degree n > 0) else: code += [f_comment("--- Evaluation of basis function derivatives of order %d ---" % n) ] code += [""] # FIXME: We extract values for all possible derivatives, even # FIXME: if not all are used. (For components, we extract only # FIXME: components that are actually used.) This may be optimized # FIXME: but the extra cost is likely small. # Get derivative tuples __, deriv_tuples = compute_derivative_tuples(n, gdim) # Generate names for derivatives derivs = ["".join(str(_d) for _d in d) for d in deriv_tuples] # Compute variables for code generation eval_stride = value_size*len(derivs) eval_size = space_dim*eval_stride table_size = num_cells*space_dim # Iterate over derivatives and initialize tables seen_derivs = set() for d in derivs: # Skip derivative if seen before (d^2/dxdy = d^2/dydx) if d in seen_derivs: continue seen_derivs.add(d) # Iterate over components for c in components: # Set name of table if c is None: table_name = prefix + "_D%s" % d else: table_name = prefix + "_C%s_D%s" % (c, d) # Generate code for declaration of table code += [f_comment("Create table %s for basis function derivatives on all cells" % table_name)] code += [(f_eval_derivs_decl % {"table_name": table_name})] code += [(f_eval_derivs_init % {"table_name": table_name, "table_size": table_size})] code += [""] # Iterate over cells (in macro element) for cell_number in range(num_cells): # Compute variables for code generation eval_name = "%s_dvalues_%d_%d" % (prefix, n, cell_number) table_offset = cell_number*space_dim vertex_offset = cell_number*num_vertices*gdim # Generate block of code for loop block = [] # Generate code for calling evaluate_basis_derivatives_all block += [f_eval_derivs % {"form_prefix": form_prefix, "element_number": element_number, "eval_name": eval_name, "gdim": gdim, "vertex_offset": vertex_offset, "n": n}] # Iterate over derivatives and extract values seen_derivs = set() for i, d in enumerate(derivs): # Skip derivative if seen before (d^2/dxdy = d^2/dydx) if d in seen_derivs: continue seen_derivs.add(d) # Iterate over components for c in components: # Set name of table and component offset if c is None: table_name = prefix + "_D%s" % d eval_offset = i else: table_name = prefix + "_C%s_D%s" % (c, d) eval_offset = len(derivs)*int(c) + i # Generate code for copying values block += [""] block += [(f_eval_derivs_copy % {"table_name": table_name, "eval_name": eval_name, "eval_stride": eval_stride, "eval_offset": eval_offset, "space_dim": space_dim, "table_offset": table_offset})] # Generate code code += [f_comment("Evaluate basis function derivatives on cell %d" % cell_number)] code += [f_static_array("double", eval_name, eval_size)] code += f_loop(block, [("ip", 0, "num_quadrature_points")]) code += [""] # Add newline code += [""] return code ffc-1.6.0/ffc/quadrature/quadratureoptimization.py000066400000000000000000000251621255571034100223760ustar00rootroot00000000000000# Copyright (C) 2013 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Marie E. Rognes, 2013 # Modified by Martin Alnaes, 2013-2014 from ufl.utils.sorting import sorted_by_key # FFC modules from ffc.log import info, error, warning from ffc.cpp import format from ffc.quadrature.symbolics import optimise_code, BASIS, IP, GEO, CONST from ffc.quadrature.symbolics import create_product, create_sum, create_symbol, create_fraction def optimize_integral_ir(ir, parameters): "Compute optimized intermediate representation of integral." # FIXME: input argument "parameters" has been added to optimize_integral_ir # FIXME: which shadows a local parameter # Get integral type and optimization parameters integral_type = ir["integral_type"] parameters = ir["optimise_parameters"] # Check whether we should optimize if parameters["optimisation"]: # Get parameters integrals = ir["trans_integrals"] integral_type = ir["integral_type"] num_facets = ir["num_facets"] num_vertices = ir["num_vertices"] geo_consts = ir["geo_consts"] psi_tables_map = ir["psi_tables_map"] # Optimize based on integral type if integral_type == "cell": info("Optimising expressions for cell integral") if parameters["optimisation"] in ("precompute_ip_const", "precompute_basis_const"): _precompute_expressions(integrals, geo_consts, parameters["optimisation"]) else: _simplify_expression(integrals, geo_consts, psi_tables_map) elif integral_type == "exterior_facet": for i in range(num_facets): info("Optimising expressions for facet integral %d" % i) if parameters["optimisation"] in ("precompute_ip_const", "precompute_basis_const"): _precompute_expressions(integrals[i], geo_consts, parameters["optimisation"]) else: _simplify_expression(integrals[i], geo_consts, psi_tables_map) elif integral_type == "interior_facet": for i in range(num_facets): for j in range(num_facets): info("Optimising expressions for facet integral (%d, %d)" % (i, j)) if parameters["optimisation"] in ("precompute_ip_const", "precompute_basis_const"): _precompute_expressions(integrals[i][j], geo_consts,parameters["optimisation"]) else: _simplify_expression(integrals[i][j], geo_consts, psi_tables_map) elif integral_type == "vertex": for i in range(num_vertices): info("Optimising expressions for poin integral %d" % i) if parameters["optimisation"] in ("precompute_ip_const", "precompute_basis_const"): _precompute_expressions(integrals[i], geo_consts, parameters["optimisation"]) else: _simplify_expression(integrals[i], geo_consts, psi_tables_map) else: error("Unhandled domain type: " + str(integral_type)) return ir def _simplify_expression(integral, geo_consts, psi_tables_map): for points, terms, functions, ip_consts, coordinate, conditionals in integral: # NOTE: sorted is needed to pass the regression tests on the buildbots # but it might be inefficient for speed. # A solution could be to only compare the output of evaluating the # integral, not the header files. for loop, (data, entry_vals) in sorted_by_key(terms): t_set, u_weights, u_psi_tables, u_nzcs, basis_consts = data new_entry_vals = [] psi_tables = set() # NOTE: sorted is needed to pass the regression tests on the buildbots # but it might be inefficient for speed. # A solution could be to only compare the output of evaluating the # integral, not the header files. for entry, val, ops in sorted(entry_vals): value = optimise_code(val, ip_consts, geo_consts, t_set) # Check if value is zero if value.val: new_entry_vals.append((entry, value, value.ops())) psi_tables.update(set([psi_tables_map[b] for b in value.get_unique_vars(BASIS)])) terms[loop][0][2] = psi_tables terms[loop][1] = new_entry_vals def _precompute_expressions(integral, geo_consts, optimisation): for points, terms, functions, ip_consts, coordinate, conditionals in integral: for loop, (data, entry_vals) in sorted_by_key(terms): t_set, u_weights, u_psi_tables, u_nzcs, basis_consts = data new_entry_vals = [] for entry, val, ops in entry_vals: value = _extract_variables(val, basis_consts, ip_consts, geo_consts, t_set, optimisation) # Check if value is zero if value.val: new_entry_vals.append((entry, value, value.ops())) terms[loop][1] = new_entry_vals def _extract_variables(val, basis_consts, ip_consts, geo_consts, t_set, optimisation): f_G = format["geometry constant"] f_I = format["ip constant"] f_B = format["basis constant"] if val._prec == 0: return val elif val._prec == 1: if val.base_expr is None: return val new_base = _extract_variables(val.base_expr, basis_consts, ip_consts, geo_consts, t_set, optimisation) new_sym = create_symbol(val.v, val.t, new_base, val.base_op) if new_sym.t == BASIS: return _reduce_expression(new_sym, [], basis_consts, f_B, True) elif new_sym.t == IP: return _reduce_expression(new_sym, [], ip_consts, f_I, True) elif new_sym.t == GEO: return _reduce_expression(new_sym, [], geo_consts, f_G, True) # First handle child classes of product and sum. elif val._prec in (2, 3): new_vars = [] for v in val.vrs: new_vars.append(_extract_variables(v, basis_consts, ip_consts, geo_consts, t_set, optimisation)) if val._prec == 2: new_val = create_product(new_vars) if val._prec == 3: new_val = create_sum(new_vars) elif val._prec == 4: num = _extract_variables(val.num, basis_consts, ip_consts, geo_consts, t_set, optimisation) denom = _extract_variables(val.denom, basis_consts, ip_consts, geo_consts, t_set, optimisation) return create_fraction(num, denom) else: error("Unknown symbolic type: %s" % repr(val)) # Sort variables of product and sum. b_c, i_c, g_c = [], [], [] for v in new_val.vrs: if v.t == BASIS: if optimisation == "precompute_basis_const": b_c.append(v) elif v.t == IP: i_c.append(v) else: g_c.append(v) vrs = new_val.vrs[:] for v in g_c + i_c + b_c: vrs.remove(v) i_c.extend(_reduce_expression(new_val, g_c, geo_consts, f_G)) vrs.extend(_reduce_expression(new_val, i_c, ip_consts, f_I)) vrs.extend(_reduce_expression(new_val, b_c, basis_consts, f_B)) # print "b_c: " # for b in b_c: # print b # print "basis" # for k,v in basis_consts.items(): # print "k: ", k # print "v: ", v # print "geo" # for k,v in geo_consts.items(): # print "k: ", k # print "v: ", v # print "ret val: ", val if len(vrs) > 1: if new_val._prec == 2: new_object = create_product(vrs) elif new_val._prec == 3: new_object = create_sum(vrs) else: error("Must have product or sum here: %s" % repr(new_val)) if new_object.t == BASIS: if optimisation == "precompute_ip_const": return new_object elif optimisation == "precompute_basis_const": return _reduce_expression(new_object, [], basis_consts, f_B, True) elif new_object.t == IP: return _reduce_expression(new_object, [], ip_consts, f_I, True) elif new_object.t == GEO: return _reduce_expression(new_object, [], geo_consts, f_G, True) return vrs[0] # if new_val._prec == 2: # if len(vrs) > 1: # new_prod = create_product(vrs) # if new_prod.t == BASIS: # if optimisation == "precompute_ip_const": # return new_prod # elif optimisation == "precompute_basis_const": # return _reduce_expression(new_prod, [], basis_consts, f_B, True) # elif new_prod.t == IP: # return _reduce_expression(new_prod, [], ip_consts, f_I, True) # elif new_prod.t == GEO: # return _reduce_expression(new_prod, [], geo_consts, f_G, True) # return vrs[0] # elif new_val._prec == 3: # if len(vrs) > 1: # new_sum = create_sum(vrs) # if new_sum.t == BASIS: # return new_sum ## return _reduce_expression(new_sum, [], basis_consts, f_B, True) # elif new_sum.t == IP: # return _reduce_expression(new_sum, [], ip_consts, f_I, True) # elif new_sum.t == GEO: # return _reduce_expression(new_sum, [], geo_consts, f_G, True) # return vrs[0] # else: # error("Must have product or sum here: %s" % repr(new_val)) def _reduce_expression(expr, symbols, const_dict, f_name, use_expr_type=False): if use_expr_type: if expr not in const_dict: const_dict[expr] = len(const_dict) return create_symbol(f_name(const_dict[expr]), expr.t) # Only something to be done if we have more than one symbol. if len(symbols) > 1: sym_type = symbols[0].t # Create new symbol. if expr._prec == 2: new_sym = create_product(symbols) elif expr._prec == 3: new_sym = create_sum(symbols) if new_sym not in const_dict: const_dict[new_sym] = len(const_dict) s = create_symbol(f_name(const_dict[new_sym]), sym_type) return [s] return symbols ffc-1.6.0/ffc/quadrature/quadraturerepresentation.py000066400000000000000000000235401255571034100227100ustar00rootroot00000000000000"Quadrature representation class for UFL" # Copyright (C) 2009-2015 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Anders Logg 2009, 2014 # Modified by Martin Alnaes 2013-2015 # Python modules import numpy, itertools, collections # UFL modules from ufl.classes import Form, Integral from ufl.sorting import sorted_expr_sum # FFC modules from ffc.log import ffc_assert, info, error, warning from ffc.utils import product from ffc.fiatinterface import create_element from ffc.representationutils import initialize_integral_ir from ffc.quadrature.tabulate_basis import tabulate_basis from ffc.quadrature.parameters import parse_optimise_parameters from ffc.quadrature.quadraturetransformer import QuadratureTransformer from ffc.quadrature.optimisedquadraturetransformer import QuadratureTransformerOpt import six def compute_integral_ir(itg_data, form_data, form_id, element_numbers, parameters): "Compute intermediate represention of integral." info("Computing quadrature representation") # Initialise representation ir = initialize_integral_ir("quadrature", itg_data, form_data, form_id) # Create and save the optisation parameters. ir["optimise_parameters"] = parse_optimise_parameters(parameters, itg_data) # Sort integrals into a dict with quadrature degree and rule as key sorted_integrals = sort_integrals(itg_data.integrals, itg_data.metadata["quadrature_degree"], itg_data.metadata["quadrature_rule"]) # Tabulate quadrature points and basis function values in these points integrals_dict, psi_tables, quadrature_rules = \ tabulate_basis(sorted_integrals, form_data, itg_data) # Save tables for quadrature weights and points ir["quadrature_weights"] = quadrature_rules # TODO: Rename this ir entry to quadrature_rules # Create dimensions of primary indices, needed to reset the argument 'A' # given to tabulate_tensor() by the assembler. ir["prim_idims"] = [create_element(ufl_element).space_dimension() for ufl_element in form_data.argument_elements] # Select transformer if ir["optimise_parameters"]["optimisation"]: QuadratureTransformerClass = QuadratureTransformerOpt else: QuadratureTransformerClass = QuadratureTransformer # Create transformer transformer = QuadratureTransformerClass(psi_tables, quadrature_rules, itg_data.domain.geometric_dimension(), itg_data.domain.topological_dimension(), ir["entitytype"], form_data.function_replace_map, ir["optimise_parameters"]) # Transform integrals. cell = itg_data.domain.cell() ir["trans_integrals"] = _transform_integrals_by_type(ir, transformer, integrals_dict, itg_data.integral_type, cell) # Save tables populated by transformer ir["name_map"] = transformer.name_map ir["unique_tables"] = transformer.unique_tables # Basis values? # Save tables map, to extract table names for optimisation option -O. ir["psi_tables_map"] = transformer.psi_tables_map ir["additional_includes_set"] = transformer.additional_includes_set # Insert empty data which will be populated if optimization is turned on ir["geo_consts"] = {} # Extract element data for psi_tables, needed for runtime quadrature. # This is used by integral type custom_integral. ir["element_data"] = _extract_element_data(transformer.element_map, element_numbers) return ir def sort_integrals(integrals, default_quadrature_degree, default_quadrature_rule): """Sort and accumulate integrals according to the number of quadrature points needed per axis. All integrals should be over the same (sub)domain. """ if not integrals: return {} # Get domain properties from first integral, assuming all are the same integral_type = integrals[0].integral_type() subdomain_id = integrals[0].subdomain_id() domain_label = integrals[0].domain().label() domain = integrals[0].domain() # FIXME: Is this safe? Get as input? ffc_assert(all(integral_type == itg.integral_type() for itg in integrals), "Expecting only integrals of the same type.") ffc_assert(all(domain_label == itg.domain().label() for itg in integrals), "Expecting only integrals on the same domain.") ffc_assert(all(subdomain_id == itg.subdomain_id() for itg in integrals), "Expecting only integrals on the same subdomain.") sorted_integrands = collections.defaultdict(list) for integral in integrals: # Override default degree and rule if specified in integral metadata integral_metadata = integral.metadata() or {} degree = integral_metadata.get("quadrature_degree", default_quadrature_degree) rule = integral_metadata.get("quadrature_rule", default_quadrature_rule) assert isinstance(degree, int) # Add integrand to dictionary according to degree and rule. key = (degree, rule) sorted_integrands[key].append(integral.integrand()) # Create integrals from accumulated integrands. sorted_integrals = {} for key, integrands in list(sorted_integrands.items()): # Summing integrands in a canonical ordering defined by UFL integrand = sorted_expr_sum(integrands) sorted_integrals[key] = Integral(integrand, integral_type, domain, subdomain_id, {}, None) return sorted_integrals def _transform_integrals_by_type(ir, transformer, integrals_dict, integral_type, cell): num_facets = cell.num_facets() num_vertices = cell.num_vertices() if integral_type == "cell": # Compute transformed integrals. info("Transforming cell integral") transformer.update_cell() terms = _transform_integrals(transformer, integrals_dict, integral_type) elif integral_type == "exterior_facet": # Compute transformed integrals. terms = [None]*num_facets for i in range(num_facets): info("Transforming exterior facet integral %d" % i) transformer.update_facets(i, None) terms[i] = _transform_integrals(transformer, integrals_dict, integral_type) elif integral_type == "interior_facet": # Compute transformed integrals. terms = [[None]*num_facets for i in range(num_facets)] for i in range(num_facets): for j in range(num_facets): info("Transforming interior facet integral (%d, %d)" % (i, j)) transformer.update_facets(i, j) terms[i][j] = _transform_integrals(transformer, integrals_dict, integral_type) elif integral_type == "vertex": # Compute transformed integrals. terms = [None]*num_vertices for i in range(num_vertices): info("Transforming vertex integral (%d)" % i) transformer.update_vertex(i) terms[i] = _transform_integrals(transformer, integrals_dict, integral_type) elif integral_type == "custom": # Compute transformed integrals: same as for cell integrals info("Transforming custom integral") transformer.update_cell() terms = _transform_integrals(transformer, integrals_dict, integral_type) else: error("Unhandled domain type: " + str(integral_type)) return terms def _transform_integrals(transformer, integrals, integral_type): "Transform integrals from UFL expression to quadrature representation." transformed_integrals = [] for point, integral in sorted(integrals.items()): transformer.update_points(point) terms = transformer.generate_terms(integral.integrand(), integral_type) transformed_integrals.append((point, terms, transformer.function_data, {}, transformer.coordinate, transformer.conditionals)) return transformed_integrals def _extract_element_data(element_map, element_numbers): "Extract element data for psi_tables" # Iterate over map element_data = {} for elements in six.itervalues(element_map): for ufl_element, counter in six.iteritems(elements): # Create corresponding FIAT element fiat_element = create_element(ufl_element) # Compute value size value_size = product(ufl_element.value_shape()) # Get element number element_number = element_numbers.get(ufl_element) if element_number is None: # FIXME: Should not be necessary, we should always know the element number #warning("Missing element number, likely because vector elements are not yet supported in custom integrals.") pass # Store data element_data[counter] = {"value_size": value_size, "num_element_dofs": fiat_element.space_dimension(), "element_number": element_number} return element_data ffc-1.6.0/ffc/quadrature/quadraturetransformer.py000066400000000000000000001014721255571034100222110ustar00rootroot00000000000000"QuadratureTransformer for quadrature code generation to translate UFL expressions." # Copyright (C) 2009-2011 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Peter Brune 2009 # Modified by Anders Logg 2009, 2013 # Modified by Lizao Li, 2015 # Python modules. from numpy import shape from six import iteritems, iterkeys from six.moves import xrange as range from six import advance_iterator as next def firstkey(d): return next(iterkeys(d)) # UFL common. from ufl.common import product, StackDict, Stack from ufl.utils.sorting import sorted_by_key # UFL Classes. from ufl.classes import FixedIndex from ufl.classes import IntValue from ufl.classes import FloatValue from ufl.classes import Coefficient from ufl.classes import Operator # FFC modules. from ffc.log import info, debug, error, ffc_assert from ffc.cpp import format # Utility and optimisation functions for quadraturegenerator. from ffc.quadrature.quadraturetransformerbase import QuadratureTransformerBase from ffc.quadrature.quadratureutils import create_permutations from ffc.quadrature.reduce_operations import operation_count from ffc.quadrature.symbolics import IP class QuadratureTransformer(QuadratureTransformerBase): "Transform UFL representation to quadrature code." def __init__(self, *args): # Initialise base class. QuadratureTransformerBase.__init__(self, *args) # ------------------------------------------------------------------------- # Start handling UFL classes. # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # AlgebraOperators (algebra.py). # ------------------------------------------------------------------------- def sum(self, o, *operands): #print("Visiting Sum: " + "\noperands: \n" + "\n".join(map(repr, operands))) # Prefetch formats to speed up code generation. f_group = format["grouping"] f_add = format["add"] f_mult = format["multiply"] f_float = format["floating point"] code = {} # Loop operands that has to be summed and sort according to map (j,k). for op in operands: # If entries does already exist we can add the code, otherwise just # dump them in the element tensor. for key, val in sorted_by_key(op): if key in code: code[key].append(val) else: code[key] = [val] # Add sums and group if necessary. for key, val in sorted_by_key(code): # Exclude all zero valued terms from sum value = [v for v in val if not v is None] if len(value) > 1: # NOTE: Since we no longer call expand_indices, the following # is needed to prevent the code from exploding for forms like # HyperElasticity duplications = {} for val in value: if val in duplications: duplications[val] += 1 continue duplications[val] = 1 # Add a product for each term that has duplicate code expressions = [] for expr, num_occur in sorted_by_key(duplications): if num_occur > 1: # Pre-multiply expression with number of occurrences expressions.append(f_mult([f_float(num_occur), expr])) continue # Just add expression if there is only one expressions.append(expr) ffc_assert(expressions, "Where did the expressions go?") if len(expressions) > 1: code[key] = f_group(f_add(expressions)) continue code[key] = expressions[0] else: # Check for zero valued sum and delete from code # This might result in returning an empty dict, but that should # be interpreted as zero by other handlers. if not value: del code[key] continue code[key] = value[0] return code def product(self, o, *operands): #print("Visiting Product with operands: \n" + "\n".join(map(repr,operands))) # Prefetch formats to speed up code generation. f_mult = format["multiply"] permute = [] not_permute = [] # Sort operands in objects that needs permutation and objects that does not. for op in operands: # If we get an empty dict, something was zero and so is the product. if not op: return {} if len(op) > 1 or (op and firstkey(op) != ()): permute.append(op) elif op and firstkey(op) == (): not_permute.append(op[()]) # Create permutations. #print("\npermute: " + repr(permute)) #print("\nnot_permute: " + repr(not_permute)) permutations = create_permutations(permute) #print("\npermutations: " + repr(permutations)) # Create code. code ={} if permutations: for key, val in sorted(permutations.items()): # Sort key in order to create a unique key. l = sorted(key) # Loop products, don't multiply by '1' and if we encounter a None the product is zero. # TODO: Need to find a way to remove and J_inv00 terms that might # disappear as a consequence of eliminating a zero valued term value = [] zero = False for v in val + not_permute: if v is None: ffc_assert(tuple(l) not in code, "This key should not be in the code.") code[tuple(l)] = None zero = True break elif not v: print("v: '%s'" % repr(v)) error("should not happen") elif v == "1": pass else: value.append(v) if not value: value = ["1"] if zero: code[tuple(l)] = None else: code[tuple(l)] = f_mult(value) else: # Loop products, don't multiply by '1' and if we encounter a None the product is zero. # TODO: Need to find a way to remove terms from 'used sets' that might # disappear as a consequence of eliminating a zero valued term value = [] for v in not_permute: if v is None: code[()] = None return code elif not v: print("v: '%s'" % repr(v)) error("should not happen") elif v == "1": pass else: value.append(v) # We did have values, but they might have been all ones. if value == [] and not_permute != []: code[()] = f_mult(["1"]) else: code[()] = f_mult(value) return code def division(self, o, *operands): #print("Visiting Division with operands: \n" + "\n".join(map(repr,operands))) # Prefetch formats to speed up code generation. f_div = format["div"] f_grouping = format["grouping"] ffc_assert(len(operands) == 2, \ "Expected exactly two operands (numerator and denominator): " + repr(operands)) # Get the code from the operands. numerator_code, denominator_code = operands # TODO: Are these safety checks needed? Need to check for None? ffc_assert(() in denominator_code and len(denominator_code) == 1, \ "Only support function type denominator: " + repr(denominator_code)) code = {} # Get denominator and create new values for the numerator. denominator = denominator_code[()] ffc_assert(denominator is not None, "Division by zero!") for key, val in numerator_code.items(): # If numerator is None the fraction is also None if val is None: code[key] = None # If denominator is '1', just return numerator elif denominator == "1": code[key] = val # Create fraction and add to code else: code[key] = f_div(val, f_grouping(denominator)) return code def power(self, o): #print("\n\nVisiting Power: " + repr(o)) # Get base and exponent. base, expo = o.ufl_operands # Visit base to get base code. base_code = self.visit(base) # TODO: Are these safety checks needed? Need to check for None? ffc_assert(() in base_code and len(base_code) == 1, "Only support function type base: " + repr(base_code)) # Get the base code. val = base_code[()] # Handle different exponents if isinstance(expo, IntValue): return {(): format["power"](val, expo.value())} elif isinstance(expo, FloatValue): return {(): format["std power"](val, format["floating point"](expo.value()))} elif isinstance(expo, (Coefficient, Operator)): exp = self.visit(expo) return {(): format["std power"](val, exp[()])} else: error("power does not support this exponent: " + repr(expo)) def abs(self, o, *operands): #print("\n\nVisiting Abs: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) # Prefetch formats to speed up code generation. f_abs = format["absolute value"] # TODO: Are these safety checks needed? Need to check for None? ffc_assert(len(operands) == 1 and () in operands[0] and len(operands[0]) == 1, \ "Abs expects one operand of function type: " + repr(operands)) # Take absolute value of operand. return {():f_abs(operands[0][()])} def min_value(self, o, *operands): f_min = format["min value"] return {():f_min(operands[0][()], operands[1][()])} def max_value(self, o, *operands): f_max = format["max value"] return {():f_max(operands[0][()], operands[1][()])} # ------------------------------------------------------------------------- # Condition, Conditional (conditional.py). # ------------------------------------------------------------------------- def not_condition(self, o, *operands): # This is a Condition but not a BinaryCondition, and the operand will be another Condition # Get condition expression and do safety checks. # Might be a bit too strict? cond, = operands ffc_assert(len(cond) == 1 and firstkey(cond) == (),\ "Condition for NotCondition should only be one function: " + repr(cond)) return {(): format["not"](cond[()])} def binary_condition(self, o, *operands): # Get LHS and RHS expressions and do safety checks. # Might be a bit too strict? lhs, rhs = operands ffc_assert(len(lhs) == 1 and firstkey(lhs) == (),\ "LHS of Condition should only be one function: " + repr(lhs)) ffc_assert(len(rhs) == 1 and firstkey(rhs) == (),\ "RHS of Condition should only be one function: " + repr(rhs)) # Map names from UFL to cpp.py. name_map = {"==":"is equal", "!=":"not equal",\ "<":"less than", ">":"greater than",\ "<=":"less equal", ">=":"greater equal",\ "&&":"and", "||": "or"} # Get values and test for None l_val = lhs[()] r_val = rhs[()] if l_val is None: l_val = format["float"](0.0) if r_val is None: r_val = format["float"](0.0) return {(): format["grouping"](l_val + format[name_map[o._name]] + r_val)} def conditional(self, o, *operands): # Get condition and return values; and do safety check. cond, true, false = operands ffc_assert(len(cond) == 1 and firstkey(cond) == (),\ "Condtion should only be one function: " + repr(cond)) ffc_assert(len(true) == 1 and firstkey(true) == (),\ "True value of Condtional should only be one function: " + repr(true)) ffc_assert(len(false) == 1 and firstkey(false) == (),\ "False value of Condtional should only be one function: " + repr(false)) # Get values and test for None t_val = true[()] f_val = false[()] if t_val is None: t_val = format["float"](0.0) if f_val is None: f_val = format["float"](0.0) # Create expression for conditional expr = format["evaluate conditional"](cond[()], t_val, f_val) num = len(self.conditionals) name = format["conditional"](num) if not expr in self.conditionals: self.conditionals[expr] = (IP, operation_count(expr, format), num) else: num = self.conditionals[expr][2] name = format["conditional"](num) return {():name} # ------------------------------------------------------------------------- # FacetNormal, CellVolume, Circumradius, FacetArea (geometry.py). # ------------------------------------------------------------------------- def cell_coordinate(self, o): # FIXME error("This object should be implemented by the child class.") def facet_coordinate(self, o): # FIXME error("This object should be implemented by the child class.") def cell_origin(self, o): # FIXME error("This object should be implemented by the child class.") def facet_origin(self, o): # FIXME error("This object should be implemented by the child class.") def cell_facet_origin(self, o): # FIXME error("This object should be implemented by the child class.") def jacobian(self, o): # FIXME error("This object should be implemented by the child class.") def jacobian_determinant(self, o): # FIXME error("This object should be implemented by the child class.") def jacobian_inverse(self, o): # FIXME error("This object should be implemented by the child class.") def facet_jacobian(self, o): # FIXME error("This object should be implemented by the child class.") def facet_jacobian_determinant(self, o): # FIXME error("This object should be implemented by the child class.") def facet_jacobian_inverse(self, o): # FIXME error("This object should be implemented by the child class.") def cell_facet_jacobian(self, o): # FIXME error("This object should be implemented by the child class.") def cell_facet_jacobian_determinant(self, o): # FIXME error("This object should be implemented by the child class.") def cell_facet_jacobian_inverse(self, o): # FIXME error("This object should be implemented by the child class.") def facet_normal(self, o): #print("Visiting FacetNormal:") # Get the component components = self.component() # Safety check. ffc_assert(len(components) == 1, "FacetNormal expects 1 component index: " + repr(components)) # Handle 1D as a special case. # FIXME: KBO: This has to change for mD elements in R^n : m < n if self.gdim == 1: # FIXME: MSA: UFL uses shape (1,) now, can we remove the special case here then? normal_component = format["normal component"](self.restriction, "") else: normal_component = format["normal component"](self.restriction, components[0]) self.trans_set.add(normal_component) return {():normal_component} def cell_normal(self, o): # FIXME error("This object should be implemented by the child class.") def cell_volume(self, o): # FIXME: KBO: This has to change for higher order elements volume = format["cell volume"](self.restriction) self.trans_set.add(volume) return {():volume} def circumradius(self, o): # FIXME: KBO: This has to change for higher order elements circumradius = format["circumradius"](self.restriction) self.trans_set.add(circumradius) return {():circumradius} def facet_area(self, o): # FIXME: KBO: This has to change for higher order elements # NOTE: Omitting restriction because the area of a facet is the same # on both sides. # FIXME: Since we use the scale factor, facet area has no meaning # for cell integrals. (Need check in FFC or UFL). area = format["facet area"] self.trans_set.add(area) return {():area} def min_facet_edge_length(self, o): # FIXME: this has no meaning for cell integrals. (Need check in FFC or UFL). tdim = self.tdim # FIXME: o.domain().topological_dimension() if tdim < 3: return self.facet_area(o) edgelen = format["min facet edge length"](self.restriction) self.trans_set.add(edgelen) return {():edgelen} def max_facet_edge_length(self, o): # FIXME: this has no meaning for cell integrals. (Need check in FFC or UFL). tdim = self.tdim # FIXME: o.domain().topological_dimension() if tdim < 3: return self.facet_area(o) edgelen = format["max facet edge length"](self.restriction) self.trans_set.add(edgelen) return {():edgelen} def cell_orientation(self, o): # FIXME error("This object should be implemented by the child class.") def quadrature_weight(self, o): # FIXME error("This object should be implemented by the child class.") # ------------------------------------------------------------------------- def create_argument(self, ufl_argument, derivatives, component, local_comp, local_offset, ffc_element, transformation, multiindices, tdim, gdim, avg): "Create code for basis functions, and update relevant tables of used basis." # Prefetch formats to speed up code generation. f_group = format["grouping"] f_add = format["add"] f_mult = format["multiply"] f_transform = format["transform"] f_detJ = format["det(J)"] f_inv = format["inverse"] # Reset code code = {} # Handle affine mappings. if transformation == "affine": # Loop derivatives and get multi indices. for multi in multiindices: deriv = [multi.count(i) for i in range(tdim)] if not any(deriv): deriv = [] # Create mapping and basis name. #print "component = ", component mapping, basis = self._create_mapping_basis(component, deriv, avg, ufl_argument, ffc_element) if not mapping in code: code[mapping] = [] if basis is not None: # Add transformation code[mapping].append(self.__apply_transform(basis, derivatives, multi, tdim, gdim)) # Handle non-affine mappings. else: ffc_assert(avg is None, "Taking average is not supported for non-affine mappings.") # Loop derivatives and get multi indices. for multi in multiindices: deriv = [multi.count(i) for i in range(tdim)] if not any(deriv): deriv = [] if 'piola' in transformation: for c in range(tdim): # Create mapping and basis name. mapping, basis = self._create_mapping_basis(c + local_offset, deriv, avg, ufl_argument, ffc_element) if not mapping in code: code[mapping] = [] if basis is not None: # Multiply basis by appropriate transform. if transformation == "covariant piola": dxdX = f_transform("JINV", c, local_comp, tdim, gdim, self.restriction) self.trans_set.add(dxdX) basis = f_mult([dxdX, basis]) elif transformation == "contravariant piola": self.trans_set.add(f_detJ(self.restriction)) detJ = f_inv(f_detJ(self.restriction)) dXdx = f_transform("J", local_comp, c, gdim, tdim, self.restriction) self.trans_set.add(dXdx) basis = f_mult([detJ, dXdx, basis]) # Add transformation if needed. code[mapping].append(self.__apply_transform(basis, derivatives, multi, tdim, gdim)) elif transformation == "pullback as metric": # g_ij = (Jinv)_ki G_kl (Jinv)lj i = local_comp // tdim j = local_comp % tdim for k in range(tdim): for l in range(tdim): # Create mapping and basis name. mapping, basis = self._create_mapping_basis( k * tdim + l + local_offset, deriv, avg, ufl_argument, ffc_element) if not mapping in code: code[mapping] = [] if basis is not None: J1 = f_transform("JINV", k, i, tdim, gdim, self.restriction) J2 = f_transform("JINV", l, j, tdim, gdim, self.restriction) self.trans_set.add(J1) self.trans_set.add(J2) basis = f_mult([J1, basis, J2]) # Add transformation if needed. code[mapping].append( self.__apply_transform( basis, derivatives, multi, tdim, gdim)) else: error("Transformation is not supported: " + repr(transformation)) # Add sums and group if necessary. for key, val in list(code.items()): if len(val) > 1: code[key] = f_group(f_add(val)) elif val: code[key] = val[0] else: # Return a None (zero) because val == [] code[key] = None return code def create_function(self, ufl_function, derivatives, component, local_comp, local_offset, ffc_element, is_quad_element, transformation, multiindices, tdim, gdim, avg): "Create code for basis functions, and update relevant tables of used basis." ffc_assert(ufl_function in self._function_replace_values, "Expecting ufl_function to have been mapped prior to this call.") # Prefetch formats to speed up code generation. f_mult = format["multiply"] f_transform = format["transform"] f_detJ = format["det(J)"] f_inv = format["inverse"] # Reset code code = [] # Handle affine mappings. if transformation == "affine": # Loop derivatives and get multi indices. for multi in multiindices: deriv = [multi.count(i) for i in range(tdim)] if not any(deriv): deriv = [] # Create function name. function_name = self._create_function_name(component, deriv, avg, is_quad_element, ufl_function, ffc_element) if function_name: # Add transformation if needed. code.append(self.__apply_transform(function_name, derivatives, multi, tdim, gdim)) # Handle non-affine mappings. else: ffc_assert(avg is None, "Taking average is not supported for non-affine mappings.") # Loop derivatives and get multi indices. for multi in multiindices: deriv = [multi.count(i) for i in range(tdim)] if not any(deriv): deriv = [] if 'piola' in transformation: # Vectors for c in range(tdim): function_name = self._create_function_name(c + local_offset, deriv, avg, is_quad_element, ufl_function, ffc_element) if function_name: # Multiply basis by appropriate transform. if transformation == "covariant piola": dxdX = f_transform("JINV", c, local_comp, tdim, gdim, self.restriction) self.trans_set.add(dxdX) function_name = f_mult([dxdX, function_name]) elif transformation == "contravariant piola": self.trans_set.add(f_detJ(self.restriction)) detJ = f_inv(f_detJ(self.restriction)) dXdx = f_transform("J", local_comp, c, gdim, tdim, self.restriction) self.trans_set.add(dXdx) function_name = f_mult([detJ, dXdx, function_name]) else: error("Transformation is not supported: ", repr(transformation)) # Add transformation if needed. code.append(self.__apply_transform(function_name, derivatives, multi, tdim, gdim)) elif transformation == "pullback as metric": # g_ij = (Jinv)_ki G_kl (Jinv)lj i = local_comp // tdim j = local_comp % tdim for k in range(tdim): for l in range(tdim): # Create mapping and basis name. function_name = self._create_function_name(k * tdim + l + local_offset, deriv, avg, is_quad_element, ufl_function, ffc_element) J1 = f_transform("JINV", k, i, tdim, gdim, self.restriction) J2 = f_transform("JINV", l, j, tdim, gdim, self.restriction) self.trans_set.add(J1) self.trans_set.add(J2) function_name = f_mult([J1, function_name, J2]) # Add transformation if needed. code.append(self.__apply_transform(function_name, derivatives, multi, tdim, gdim)) else: error("Transformation is not supported: " + repr(transformation)) if not code: return None elif len(code) > 1: code = format["grouping"](format["add"](code)) else: code = code[0] return code # ------------------------------------------------------------------------- # Helper functions for Argument and Coefficient # ------------------------------------------------------------------------- def __apply_transform(self, function, derivatives, multi, tdim, gdim): # XXX UFLACS REUSE "Apply transformation (from derivatives) to basis or function." f_mult = format["multiply"] f_transform = format["transform"] # Add transformation if needed. transforms = [] if self.integral_type == "custom": for i, direction in enumerate(derivatives): # Custom integrals to not need transforms, so in place # of the transform, we insert an identity matrix ref = multi[i] if ref != direction: transforms.append(0) else: for i, direction in enumerate(derivatives): ref = multi[i] t = f_transform("JINV", ref, direction, tdim, gdim, self.restriction) self.trans_set.add(t) transforms.append(t) # Only multiply by basis if it is present. if function: prods = transforms + [function] else: prods = transforms return format["multiply"](prods) # ------------------------------------------------------------------------- # Helper functions for transformation of UFL objects in base class # ------------------------------------------------------------------------- def _create_symbol(self, symbol, domain): return {():symbol} def _create_product(self, symbols): return format["multiply"](symbols) def _format_scalar_value(self, value): #print("format_scalar_value: %d" % value) if value is None: return {():None} # TODO: Handle value < 0 better such that we don't have + -2 in the code. return {():format["floating point"](value)} def _math_function(self, operands, format_function): # TODO: Are these safety checks needed? ffc_assert(len(operands) == 1 and () in operands[0] and len(operands[0]) == 1, \ "MathFunctions expect one operand of function type: " + repr(operands)) # Use format function on value of operand. new_operand = {} operand = operands[0] for key, val in operand.items(): new_operand[key] = format_function(val) return new_operand def _atan_2_function(self, operands, format_function): x1, x2 = operands x1, x2 = sorted(x1.values())[0], sorted(x2.values())[0] if x1 is None: x1 = format["floating point"](0.0) if x2 is None: x2 = format["floating point"](0.0) return {():format_function(x1, x2)} def _bessel_function(self, operands, format_function): # TODO: Are these safety checks needed? ffc_assert(len(operands) == 2,\ "BesselFunctions expect two operands of function type: " + repr(operands)) nu, x = operands ffc_assert(len(nu) == 1 and () in nu,\ "Expecting one operand of function type as first argument to BesselFunction : " + repr(nu)) ffc_assert(len(x) == 1 and () in x,\ "Expecting one operand of function type as second argument to BesselFunction : " + repr(x)) nu = nu[()] x = x[()] if nu is None: nu = format["floating point"](0.0) if x is None: x = format["floating point"](0.0) # Use format function on arguments. # NOTE: Order of nu and x is reversed compared to the UFL and C++ # function calls because of how Symbol treats exponents. # this will change once quadrature optimisations has been cleaned up. return {():format_function(x, nu)} # ------------------------------------------------------------------------- # Helper functions for code_generation() # ------------------------------------------------------------------------- def _count_operations(self, expression): return operation_count(expression, format) def _create_entry_data(self, val, integral_type): # Multiply value by weight and determinant # Create weight and scale factor. weight = format["weight"](self.points) if self.points is None or self.points > 1: weight += format["component"]("", format["integration points"]) # Update sets of used variables. if integral_type in ("vertex", "custom"): trans_set = set() value = format["mul"]([val, weight]) else: f_scale_factor = format["scale factor"] trans_set = set([f_scale_factor]) value = format["mul"]([val, weight, f_scale_factor]) trans_set.update(self.trans_set) used_points = set([self.points]) ops = self._count_operations(value) used_psi_tables = set([v for k, v in self.psi_tables_map.items()]) return (value, ops, [trans_set, used_points, used_psi_tables]) ffc-1.6.0/ffc/quadrature/quadraturetransformerbase.py000066400000000000000000001477471255571034100230630ustar00rootroot00000000000000"""QuadratureTransformerBase, a common class for quadrature transformers to translate UFL expressions.""" # Copyright (C) 2009-2013 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Martin Alnaes, 2013 # Modified by Garth N. Wells, 2013 # Modified by Lizao Li, 2015 # # First added: 2009-10-13 # Last changed: 2015-03-20 # Python modules. from six.moves import zip from numpy import shape, array # UFL Classes. from ufl.classes import FixedIndex, Index from ufl.common import StackDict, Stack, product from ufl.permutation import build_component_numbering # UFL Algorithms. from ufl.algorithms import Transformer # FFC modules. from ffc.log import ffc_assert, error, info from ffc.fiatinterface import create_element, map_facet_points from ffc.mixedelement import MixedElement from ffc.cpp import format # FFC tensor modules. from ffc.tensor.multiindex import MultiIndex as FFCMultiIndex from ffc.representationutils import transform_component # Utility and optimisation functions for quadraturegenerator. from ffc.quadrature.quadratureutils import create_psi_tables from ffc.quadrature.symbolics import BASIS, IP, GEO, CONST class QuadratureTransformerBase(Transformer): "Transform UFL representation to quadrature code." def __init__(self, psi_tables, quad_weights, gdim, tdim, entity_type, function_replace_map, optimise_parameters): Transformer.__init__(self) # Save optimise_parameters, weights and fiat_elements_map. self.optimise_parameters = optimise_parameters # Map from original functions with possibly incomplete elements # to functions with properly completed elements self._function_replace_map = function_replace_map self._function_replace_values = set(function_replace_map.values()) # For assertions # Create containers and variables. self.used_psi_tables = set() self.psi_tables_map = {} self.used_weights = set() self.quad_weights = quad_weights self.used_nzcs = set() self.ip_consts = {} self.trans_set = set() self.function_data = {} self.tdim = tdim self.gdim = gdim self.entity_type = entity_type self.points = 0 self.facet0 = None self.facet1 = None self.vertex = None self.restriction = None self.avg = None self.coordinate = None self.conditionals = {} self.additional_includes_set = set() self.__psi_tables = psi_tables # TODO: Unused? Remove? # Stacks. self._derivatives = [] self._index2value = StackDict() self._components = Stack() self.element_map, self.name_map, self.unique_tables =\ create_psi_tables(psi_tables, self.optimise_parameters["eliminate zeros"], self.entity_type) # Cache. self.argument_cache = {} self.function_cache = {} def update_cell(self): ffc_assert(self.entity_type == "cell", "Not expecting update_cell on a %s." % self.entity_type) self.facet0 = None self.facet1 = None self.vertex = None self.coordinate = None self.conditionals = {} def update_facets(self, facet0, facet1): ffc_assert(self.entity_type == "facet", "Not expecting update_facet on a %s." % self.entity_type) self.facet0 = facet0 self.facet1 = facet1 self.vertex = None self.coordinate = None self.conditionals = {} def update_vertex(self, vertex): ffc_assert(self.entity_type == "vertex", "Not expecting update_vertex on a %s." % self.entity_type) self.facet0 = None self.facet1 = None self.vertex = vertex self.coordinate = None self.conditionals = {} def update_points(self, points): self.points = points self.coordinate = None # Reset functions everytime we move to a new quadrature loop self.conditionals = {} self.function_data = {} # Reset cache self.argument_cache = {} self.function_cache = {} def disp(self): print("\n\n **** Displaying QuadratureTransformer ****") print("\nQuadratureTransformer, element_map:\n", self.element_map) print("\nQuadratureTransformer, name_map:\n", self.name_map) print("\nQuadratureTransformer, unique_tables:\n", self.unique_tables) print("\nQuadratureTransformer, used_psi_tables:\n", self.used_psi_tables) print("\nQuadratureTransformer, psi_tables_map:\n", self.psi_tables_map) print("\nQuadratureTransformer, used_weights:\n", self.used_weights) def component(self): "Return current component tuple." if len(self._components): return self._components.peek() return () def derivatives(self): "Return all derivatives tuple." if len(self._derivatives): return tuple(self._derivatives[:]) return () # ------------------------------------------------------------------------- # Start handling UFL classes. # ------------------------------------------------------------------------- # Nothing in expr.py is handled. Can only handle children of these clases. def expr(self, o): print("\n\nVisiting basic Expr:", repr(o), "with operands:") error("This expression is not handled: " + repr(o)) # Nothing in terminal.py is handled. Can only handle children of these clases. def terminal(self, o): print("\n\nVisiting basic Terminal:", repr(o), "with operands:") error("This terminal is not handled: " + repr(o)) # ------------------------------------------------------------------------- # Things which should not be here (after expansion etc.) from: # algebra.py, differentiation.py, finiteelement.py, # form.py, geometry.py, indexing.py, integral.py, tensoralgebra.py, variable.py. # ------------------------------------------------------------------------- def derivative(self, o, *operands): print("\n\nVisiting Derivative: ", repr(o)) error("All derivatives apart from Grad should have been expanded!!") def compound_tensor_operator(self, o): print("\n\nVisiting CompoundTensorOperator: ", repr(o)) error("CompoundTensorOperator should have been expanded.") def label(self, o): print("\n\nVisiting Label: ", repr(o)) error("What is a Lable doing in the integrand?") # ------------------------------------------------------------------------- # Things which are not supported yet, from: # condition.py, constantvalue.py, function.py, geometry.py, lifting.py, # mathfunctions.py, restriction.py # ------------------------------------------------------------------------- def condition(self, o): print("\n\nVisiting Condition:", repr(o)) error("This type of Condition is not supported (yet).") def constant_value(self, o): print("\n\nVisiting ConstantValue:", repr(o)) error("This type of ConstantValue is not supported (yet).") def geometric_quantity(self, o): print("\n\nVisiting GeometricQuantity:", repr(o)) error("This type of GeometricQuantity is not supported (yet).") def math_function(self, o): print("\n\nVisiting MathFunction:", repr(o)) error("This MathFunction is not supported (yet).") def atan_2_function(self, o): print("\n\nVisiting Atan2Function:", repr(o)) error("Atan2Function is not implemented (yet).") def bessel_function(self, o): print("\n\nVisiting BesselFunction:", repr(o)) error("BesselFunction is not implemented (yet).") def restricted(self, o): print("\n\nVisiting Restricted:", repr(o)) error("This type of Restricted is not supported (only positive and negative are currently supported).") # ------------------------------------------------------------------------- # Handlers that should be implemented by child classes. # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # AlgebraOperators (algebra.py). # ------------------------------------------------------------------------- def sum(self, o, *operands): print("\n\nVisiting Sum: ", repr(o)) error("This object should be implemented by the child class.") def product(self, o, *operands): print("\n\nVisiting Product: ", repr(o)) error("This object should be implemented by the child class.") def division(self, o, *operands): print("\n\nVisiting Division: ", repr(o)) error("This object should be implemented by the child class.") def power(self, o): print("\n\nVisiting Power: ", repr(o)) error("This object should be implemented by the child class.") def abs(self, o, *operands): print("\n\nVisiting Abs: ", repr(o)) error("This object should be implemented by the child class.") # ------------------------------------------------------------------------- # FacetNormal, CellVolume, Circumradius (geometry.py). # ------------------------------------------------------------------------- def cell_coordinate(self, o): error("This object should be implemented by the child class.") def facet_coordinate(self, o): error("This object should be implemented by the child class.") def cell_origin(self, o): error("This object should be implemented by the child class.") def facet_origin(self, o): error("This object should be implemented by the child class.") def cell_facet_origin(self, o): error("This object should be implemented by the child class.") def jacobian(self, o): error("This object should be implemented by the child class.") def jacobian_determinant(self, o): error("This object should be implemented by the child class.") def jacobian_inverse(self, o): error("This object should be implemented by the child class.") def facet_jacobian(self, o): error("This object should be implemented by the child class.") def facet_jacobian_determinant(self, o): error("This object should be implemented by the child class.") def facet_jacobian_inverse(self, o): error("This object should be implemented by the child class.") def cell_facet_jacobian(self, o): error("This object should be implemented by the child class.") def cell_facet_jacobian_determinant(self, o): error("This object should be implemented by the child class.") def cell_facet_jacobian_inverse(self, o): error("This object should be implemented by the child class.") def facet_normal(self, o): error("This object should be implemented by the child class.") def cell_normal(self, o): error("This object should be implemented by the child class.") def cell_volume(self, o): error("This object should be implemented by the child class.") def circumradius(self, o): error("This object should be implemented by the child class.") def facet_area(self, o): error("This object should be implemented by the child class.") def min_facet_edge_length(self, o): error("This object should be implemented by the child class.") def max_facet_edge_length(self, o): error("This object should be implemented by the child class.") def cell_orientation(self, o): error("This object should be implemented by the child class.") def quadrature_weight(self, o): error("This object should be implemented by the child class.") # ------------------------------------------------------------------------- # Things that can be handled by the base class. # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # Argument (basisfunction.py). # ------------------------------------------------------------------------- def argument(self, o): #print("\nVisiting Argument:" + repr(o)) # Create aux. info. components = self.component() derivatives = self.derivatives() # Check if basis is already in cache key = (o, components, derivatives, self.restriction, self.avg) basis = self.argument_cache.get(key, None) tdim = self.tdim # FIXME: o.domain().topological_dimension() ??? # FIXME: Why does using a code dict from cache make the expression manipulations blow (MemoryError) up later? if basis is None or self.optimise_parameters["optimisation"]: # Get auxiliary variables to generate basis (component, local_elem, local_comp, local_offset, ffc_element, transformation, multiindices) = self._get_auxiliary_variables(o, components, derivatives) # Create mapping and code for basis function and add to dict. basis = self.create_argument(o, derivatives, component, local_comp, local_offset, ffc_element, transformation, multiindices, tdim, self.gdim, self.avg) self.argument_cache[key] = basis return basis # ------------------------------------------------------------------------- # Constant values (constantvalue.py). # ------------------------------------------------------------------------- def identity(self, o): #print "\n\nVisiting Identity: ", repr(o) # Get components i, j = self.component() # Only return a value if i==j if i == j: return self._format_scalar_value(1.0) else: return self._format_scalar_value(None) def scalar_value(self, o): "ScalarValue covers IntValue and FloatValue" #print "\n\nVisiting ScalarValue: ", repr(o) return self._format_scalar_value(o.value()) def zero(self, o): #print "\n\nVisiting Zero:", repr(o) return self._format_scalar_value(None) # ------------------------------------------------------------------------- # Grad (differentiation.py). # ------------------------------------------------------------------------- def grad(self, o): #print("\n\nVisiting Grad: " + repr(o)) # Get expression derivative_expr, = o.ufl_operands # Get components components = self.component() en = derivative_expr.rank() cn = len(components) ffc_assert(o.rank() == cn, "Expecting rank of grad expression to match components length.") # Get direction of derivative if cn == en+1: der = components[en] self._components.push(components[:en]) elif cn == en: # This happens in 1D, sligtly messy result of defining grad(f) == f.dx(0) der = 0 else: error("Unexpected rank %d and component length %d in grad expression." % (en, cn)) # Add direction to list of derivatives self._derivatives.append(der) # Visit children to generate the derivative code. code = self.visit(derivative_expr) # Remove the direction from list of derivatives self._derivatives.pop() if cn == en+1: self._components.pop() return code # ------------------------------------------------------------------------- # Coefficient and Constants (function.py). # ------------------------------------------------------------------------- def coefficient(self, o): #print("\nVisiting Coefficient: " + repr(o)) # Map o to object with proper element and count o = self._function_replace_map[o] # Create aux. info. components = self.component() derivatives = self.derivatives() # Check if function is already in cache key = (o, components, derivatives, self.restriction, self.avg) function_code = self.function_cache.get(key) # FIXME: Why does using a code dict from cache make the expression manipulations blow (MemoryError) up later? if function_code is None or self.optimise_parameters["optimisation"]: # Get auxiliary variables to generate function (component, local_elem, local_comp, local_offset, ffc_element, transformation, multiindices) = self._get_auxiliary_variables(o, components, derivatives) # Check that we don't take derivatives of QuadratureElements. is_quad_element = local_elem.family() == "Quadrature" ffc_assert(not (derivatives and is_quad_element), \ "Derivatives of Quadrature elements are not supported: " + repr(o)) tdim = self.tdim # FIXME: o.domain().topological_dimension() ??? # Create code for function and add empty tuple to cache dict. function_code = {(): self.create_function(o, derivatives, component, local_comp, local_offset, ffc_element, is_quad_element, transformation, multiindices, tdim, self.gdim, self.avg)} self.function_cache[key] = function_code return function_code def constant(self, o): #print("\n\nVisiting Constant: " + repr(o)) # Map o to object with proper element and count o = self._function_replace_map[o] # Safety checks. ffc_assert(len(self.component()) == 0, "Constant does not expect component indices: " + repr(self._components)) ffc_assert(o.shape() == (), "Constant should not have a value shape: " + repr(o.shape())) # Component default is 0 component = 0 # Handle restriction. if self.restriction == "-": component += 1 # Let child class create constant symbol coefficient = format["coefficient"](o.count(), component) return self._create_symbol(coefficient, CONST) def vector_constant(self, o): #print("\n\nVisiting VectorConstant: " + repr(o)) # Map o to object with proper element and count o = self._function_replace_map[o] # Get the component components = self.component() # Safety checks. ffc_assert(len(components) == 1, "VectorConstant expects 1 component index: " + repr(components)) # We get one component. component = components[0] # Handle restriction. if self.restriction == "-": component += o.shape()[0] # Let child class create constant symbol coefficient = format["coefficient"](o.count(), component) return self._create_symbol(coefficient, CONST) def tensor_constant(self, o): #print("\n\nVisiting TensorConstant: " + repr(o)) # Map o to object with proper element and count o = self._function_replace_map[o] # Get the components components = self.component() # Safety checks. ffc_assert(len(components) == len(o.shape()), \ "The number of components '%s' must be equal to the number of shapes '%s' for TensorConstant." % (repr(components), repr(o.shape()))) # Let the UFL element handle the component map. component = o.element()._sub_element_mapping[components] # Handle restriction (offset by value shape). if self.restriction == "-": component += product(o.shape()) # Let child class create constant symbol coefficient = format["coefficient"](o.count(), component) return self._create_symbol(coefficient, CONST) # ------------------------------------------------------------------------- # SpatialCoordinate (geometry.py). # ------------------------------------------------------------------------- def spatial_coordinate(self, o): #print "\n\nVisiting SpatialCoordinate:", repr(o) #print "\n\nVisiting SpatialCoordinate:", repr(operands) # Get the component. components = self.component() c, = components if self.vertex is not None: error("Spatial coordinates (x) not implemented for point measure (dP)") # TODO: Implement this, should be just the point. else: # Generate the appropriate coordinate and update tables. coordinate = format["ip coordinates"](self.points, c) self._generate_affine_map() return self._create_symbol(coordinate, IP) # ------------------------------------------------------------------------- # Indexed (indexed.py). # ------------------------------------------------------------------------- def indexed(self, o): #print("\n\nVisiting Indexed:" + repr(o)) # Get indexed expression and index, map index to current value # and update components indexed_expr, index = o.ufl_operands self._components.push(self.visit(index)) # Visit expression subtrees and generate code. code = self.visit(indexed_expr) # Remove component again self._components.pop() return code # ------------------------------------------------------------------------- # MultiIndex (indexing.py). # ------------------------------------------------------------------------- def multi_index(self, o): #print("\n\nVisiting MultiIndex:" + repr(o)) # Loop all indices in MultiIndex and get current values subcomp = [] for i in o: if isinstance(i, FixedIndex): subcomp.append(i._value) elif isinstance(i, Index): subcomp.append(self._index2value[i]) return tuple(subcomp) # ------------------------------------------------------------------------- # IndexSum (indexsum.py). # ------------------------------------------------------------------------- def index_sum(self, o): #print("\n\nVisiting IndexSum: " + str(tree_format(o))) # Get expression and index that we're summing over summand, multiindex = o.ufl_operands index, = multiindex # Loop index range, update index/value dict and generate code ops = [] for i in range(o.dimension()): self._index2value.push(index, i) ops.append(self.visit(summand)) self._index2value.pop() # Call sum to generate summation code = self.sum(o, *ops) return code # ------------------------------------------------------------------------- # MathFunctions (mathfunctions.py). # ------------------------------------------------------------------------- def sqrt(self, o, *operands): #print("\n\nVisiting Sqrt: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) return self._math_function(operands, format["sqrt"]) def exp(self, o, *operands): #print("\n\nVisiting Exp: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) return self._math_function(operands, format["exp"]) def ln(self, o, *operands): #print("\n\nVisiting Ln: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) return self._math_function(operands, format["ln"]) def cos(self, o, *operands): #print("\n\nVisiting Cos: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) return self._math_function(operands, format["cos"]) def sin(self, o, *operands): #print("\n\nVisiting Sin: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) return self._math_function(operands, format["sin"]) def tan(self, o, *operands): #print("\n\nVisiting Tan: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) return self._math_function(operands, format["tan"]) def cosh(self, o, *operands): #print("\n\nVisiting Cosh: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) return self._math_function(operands, format["cosh"]) def sinh(self, o, *operands): #print("\n\nVisiting Sinh: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) return self._math_function(operands, format["sinh"]) def tanh(self, o, *operands): #print("\n\nVisiting Tanh: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) return self._math_function(operands, format["tanh"]) def acos(self, o, *operands): #print("\n\nVisiting Acos: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) return self._math_function(operands, format["acos"]) def asin(self, o, *operands): #print("\n\nVisiting Asin: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) return self._math_function(operands, format["asin"]) def atan(self, o, *operands): #print("\n\nVisiting Atan: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) return self._math_function(operands, format["atan"]) def atan_2(self, o, *operands): #print("\n\nVisiting Atan2: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) self.additional_includes_set.add("#include ") return self._atan_2_function(operands, format["atan_2"]) def erf(self, o, *operands): #print("\n\nVisiting Erf: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) return self._math_function(operands, format["erf"]) def bessel_i(self, o, *operands): #print("\n\nVisiting Bessel_I: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) #self.additional_includes_set.add("#include ") self.additional_includes_set.add("#include ") return self._bessel_function(operands, format["bessel_i"]) def bessel_j(self, o, *operands): #print("\n\nVisiting Bessel_J: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) #self.additional_includes_set.add("#include ") self.additional_includes_set.add("#include ") return self._bessel_function(operands, format["bessel_j"]) def bessel_k(self, o, *operands): #print("\n\nVisiting Bessel_K: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) #self.additional_includes_set.add("#include ") self.additional_includes_set.add("#include ") return self._bessel_function(operands, format["bessel_k"]) def bessel_y(self, o, *operands): #print("\n\nVisiting Bessel_Y: " + repr(o) + "with operands: " + "\n".join(map(repr,operands))) #self.additional_includes_set.add("#include ") self.additional_includes_set.add("#include ") return self._bessel_function(operands, format["bessel_y"]) # ------------------------------------------------------------------------- # PositiveRestricted and NegativeRestricted (restriction.py). # ------------------------------------------------------------------------- def positive_restricted(self, o): #print("\n\nVisiting PositiveRestricted: " + repr(o)) # Just get the first operand, there should only be one. restricted_expr = o.ufl_operands ffc_assert(len(restricted_expr) == 1, "Only expected one operand for restriction: " + repr(restricted_expr)) ffc_assert(self.restriction is None, "Expression is restricted twice: " + repr(restricted_expr)) # Set restriction, visit operand and reset restriction self.restriction = "+" code = self.visit(restricted_expr[0]) self.restriction = None return code def negative_restricted(self, o): #print("\n\nVisiting NegativeRestricted: " + repr(o)) # Just get the first operand, there should only be one. restricted_expr = o.ufl_operands ffc_assert(len(restricted_expr) == 1, "Only expected one operand for restriction: " + repr(restricted_expr)) ffc_assert(self.restriction is None, "Expression is restricted twice: " + repr(restricted_expr)) # Set restriction, visit operand and reset restriction self.restriction = "-" code = self.visit(restricted_expr[0]) self.restriction = None return code def cell_avg(self, o): ffc_assert(self.avg is None, "Not expecting nested averages.") # Just get the first operand, there should only be one. expr, = o.ufl_operands # Set average marker, visit operand and reset marker self.avg = "cell" code = self.visit(expr) self.avg = None return code def facet_avg(self, o): ffc_assert(self.avg is None, "Not expecting nested averages.") ffc_assert(self.entity_type != "cell", "Cannot take facet_avg in a cell integral.") # Just get the first operand, there should only be one. expr, = o.ufl_operands # Set average marker, visit operand and reset marker self.avg = "facet" code = self.visit(expr) self.avg = None return code # ------------------------------------------------------------------------- # ComponentTensor (tensors.py). # ------------------------------------------------------------------------- def component_tensor(self, o): #print("\n\nVisiting ComponentTensor:\n" + str(tree_format(o))) # Get expression and indices component_expr, indices = o.ufl_operands # Get current component(s) components = self.component() ffc_assert(len(components) == len(indices), \ "The number of known components must be equal to the number of components of the ComponentTensor for this to work.") # Update the index dict (map index values of current known indices to # those of the component tensor) for i, v in zip(indices._indices, components): self._index2value.push(i, v) # Push an empty component tuple self._components.push(()) # Visit expression subtrees and generate code. code = self.visit(component_expr) # Remove the index map from the StackDict for i in range(len(components)): self._index2value.pop() # Remove the empty component tuple self._components.pop() return code def list_tensor(self, o): #print("\n\nVisiting ListTensor: " + repr(o)) # Get the component component = self.component() # Extract first and the rest of the components c0, c1 = component[0], component[1:] # Get first operand op = o.ufl_operands[c0] # Evaluate subtensor with this subcomponent self._components.push(c1) code = self.visit(op) self._components.pop() return code # ------------------------------------------------------------------------- # Variable (variable.py). # ------------------------------------------------------------------------- def variable(self, o): #print("\n\nVisiting Variable: " + repr(o)) # Just get the expression associated with the variable return self.visit(o.expression()) # ------------------------------------------------------------------------- # Generate terms for representation. # ------------------------------------------------------------------------- def generate_terms(self, integrand, integral_type): "Generate terms for code generation." # Set domain type self.integral_type = integral_type # Get terms terms = self.visit(integrand) # Get formatting f_nzc = format["nonzero columns"](0).split("0")[0] # Loop code and add weight and scale factor to value and sort after # loop ranges. new_terms = {} for key, val in sorted(terms.items()): # If value was zero continue. if val is None: continue # Create data. value, ops, sets = self._create_entry_data(val, integral_type) # Extract nzc columns if any and add to sets. used_nzcs = set([int(k[1].split(f_nzc)[1].split("[")[0]) for k in key if f_nzc in k[1]]) sets.append(used_nzcs) # Create loop information and entry from key info and insert into dict. loop, entry = self._create_loop_entry(key, f_nzc) if not loop in new_terms: sets.append({}) new_terms[loop] = [sets, [(entry, value, ops)]] else: for i, s in enumerate(sets): new_terms[loop][0][i].update(s) new_terms[loop][1].append((entry, value, ops)) return new_terms def _create_loop_entry(self, key, f_nzc): indices = {0: format["first free index"], 1: format["second free index"]} # Create appropriate entries. # FIXME: We only support rank 0, 1 and 2. entry = "" loop = () if len(key) == 0: entry = "0" elif len(key) == 1: key = key[0] # Checking if the basis was a test function. # TODO: Make sure test function indices are always rearranged to 0. ffc_assert(key[0] == -2 or key[0] == 0, \ "Linear forms must be defined using test functions only: " + repr(key)) index_j, entry, range_j, space_dim_j = key loop = ((indices[index_j], 0, range_j),) if range_j == 1 and self.optimise_parameters["ignore ones"] and not (f_nzc in entry): loop = () elif len(key) == 2: # Extract test and trial loops in correct order and check if for is legal. key0, key1 = (0, 0) for k in key: ffc_assert(k[0] in indices, \ "Bilinear forms must be defined using test and trial functions (index -2, -1, 0, 1): " + repr(k)) if k[0] == -2 or k[0] == 0: key0 = k else: key1 = k index_j, entry_j, range_j, space_dim_j = key0 index_k, entry_k, range_k, space_dim_k = key1 loop = [] if not (range_j == 1 and self.optimise_parameters["ignore ones"]) or f_nzc in entry_j: loop.append((indices[index_j], 0, range_j)) if not (range_k == 1 and self.optimise_parameters["ignore ones"]) or f_nzc in entry_k: loop.append((indices[index_k], 0, range_k)) entry = format["add"]([format["mul"]([entry_j, str(space_dim_k)]), entry_k]) loop = tuple(loop) else: error("Only rank 0, 1 and 2 tensors are currently supported: " + repr(key)) # Generate the code line for the entry. # Try to evaluate entry ("3*6 + 2" --> "20"). try: entry = str(eval(entry)) except: pass return loop, entry # ------------------------------------------------------------------------- # Helper functions for transformation of UFL objects in base class # ------------------------------------------------------------------------- def _create_symbol(self, symbol, domain): error("This function should be implemented by the child class.") def _create_product(self, symbols): error("This function should be implemented by the child class.") def _format_scalar_value(self, value): error("This function should be implemented by the child class.") def _math_function(self, operands, format_function): error("This function should be implemented by the child class.") def _atan_2_function(self, operands, format_function): error("This function should be implemented by the child class.") def _get_auxiliary_variables(self, ufl_function, component, derivatives): "Helper function for both Coefficient and Argument." # Get UFL element. ufl_element = ufl_function.element() # Get subelement and the relative (flattened) component (in case we have mixed elements). local_comp, local_elem = ufl_element.extract_component(component) # For basic tensor elements, local_comp should be flattened if len(local_comp) and len(local_elem.value_shape()) > 0: # Map component using component map from UFL. (TODO: inefficient use of this function) comp_map, _ = build_component_numbering(local_elem.value_shape(), local_elem.symmetry()) local_comp = comp_map[local_comp] # Set local_comp to 0 if it is () if not local_comp: local_comp = 0 # Check that component != not () since the UFL component map will turn # it into 0, and () does not mean zeroth component in this context. if len(component): # Map component using component map from UFL. (TODO: inefficient use of this function) comp_map, comp_num = build_component_numbering(ufl_element.value_shape(), ufl_element.symmetry()) component = comp_map[component] # Map physical components into reference components component, dummy = transform_component(component, 0, ufl_element) # Compute the local offset (needed for non-affine mappings). local_offset = component - local_comp else: # Compute the local offset (needed for non-affine mappings). local_offset = 0 # Create FFC element. ffc_element = create_element(ufl_element) # Assuming that mappings for all basisfunctions are equal ffc_sub_element = create_element(local_elem) transformation = ffc_sub_element.mapping()[0] ffc_assert(all(transformation == mapping for mapping in ffc_sub_element.mapping()), "Assuming subelement mappings are equal but they differ.") # Generate FFC multi index for derivatives. tdim = self.tdim # FIXME: ufl_element.domain().topological_dimension() ??? multiindices = FFCMultiIndex([list(range(tdim))]*len(derivatives)).indices return (component, local_elem, local_comp, local_offset, ffc_element, transformation, multiindices) def _get_current_entity(self): if self.entity_type == "cell": # If we add macro cell integration, I guess the 'current cell number' would go here? return 0 elif self.entity_type == "facet": # Handle restriction through facet. return {"+": self.facet0, "-": self.facet1, None: self.facet0}[self.restriction] elif self.entity_type == "vertex": return self.vertex else: error("Unknown entity type %s." % self.entity_type) def _create_mapping_basis(self, component, deriv, avg, ufl_argument, ffc_element): "Create basis name and mapping from given basis_info." # Get string for integration points. f_ip = "0" if (avg or self.points == 1) else format["integration points"] generate_psi_name = format["psi name"] # Only support test and trial functions. indices = {0: format["first free index"], 1: format["second free index"]} # Check that we have a basis function. ffc_assert(ufl_argument.number() in indices, "Currently, Argument number must be either 0 or 1: " + repr(ufl_argument)) ffc_assert(ufl_argument.part() is None, "Currently, Argument part is not supporte: " + repr(ufl_argument)) # Get element counter and loop index. element_counter = self.element_map[1 if avg else self.points][ufl_argument.element()] loop_index = indices[ufl_argument.number()] # Offset element space dimension in case of negative restriction, # need to use the complete element for offset in case of mixed element. space_dim = ffc_element.space_dimension() offset = {"+": "", "-": str(space_dim), None: ""}[self.restriction] # If we have a restricted function multiply space_dim by two. if self.restriction in ("+", "-"): space_dim *= 2 # Create basis access, we never need to map the entry in the basis table # since we will either loop the entire space dimension or the non-zeros. if self.restriction in ("+", "-") and self.integral_type == "custom" and offset != "": # Special case access for custom integrals (all basis functions stored in flattened array) basis_access = format["component"]("", [f_ip, format["add"]([loop_index, offset])]) else: # Normal basis function access basis_access = format["component"]("", [f_ip, loop_index]) # Get current cell entity, with current restriction considered entity = self._get_current_entity() name = generate_psi_name(element_counter, self.entity_type, entity, component, deriv, avg) name, non_zeros, zeros, ones = self.name_map[name] loop_index_range = shape(self.unique_tables[name])[1] # If domain type is custom, then special-case set loop index # range since table is empty if self.integral_type == "custom": loop_index_range = ffc_element.space_dimension() # different from `space_dimension`... basis = "" # Ignore zeros if applicable if zeros and (self.optimise_parameters["ignore zero tables"] or self.optimise_parameters["remove zero terms"]): basis = self._format_scalar_value(None)[()] # If the loop index range is one we can look up the first component # in the psi array. If we only have ones we don't need the basis. elif self.optimise_parameters["ignore ones"] and loop_index_range == 1 and ones: loop_index = "0" basis = self._format_scalar_value(1.0)[()] else: # Add basis name to the psi tables map for later use. basis = self._create_symbol(name + basis_access, BASIS)[()] self.psi_tables_map[basis] = name # Create the correct mapping of the basis function into the local element tensor. basis_map = loop_index if non_zeros and basis_map == "0": basis_map = str(non_zeros[1][0]) elif non_zeros: basis_map = format["component"](format["nonzero columns"](non_zeros[0]), basis_map) if offset: basis_map = format["grouping"](format["add"]([basis_map, offset])) # Try to evaluate basis map ("3 + 2" --> "5"). try: basis_map = str(eval(basis_map)) except: pass # Create mapping (index, map, loop_range, space_dim). # Example dx and ds: (0, j, 3, 3) # Example dS: (0, (j + 3), 3, 6), 6=2*space_dim # Example dS optimised: (0, (nz2[j] + 3), 2, 6), 6=2*space_dim mapping = ((ufl_argument.number(), basis_map, loop_index_range, space_dim),) return (mapping, basis) def _create_function_name(self, component, deriv, avg, is_quad_element, ufl_function, ffc_element): ffc_assert(ufl_function in self._function_replace_values, "Expecting ufl_function to have been mapped prior to this call.") # Get string for integration points. f_ip = "0" if (avg or self.points == 1) else format["integration points"] # Get the element counter. element_counter = self.element_map[1 if avg else self.points][ufl_function.element()] # Get current cell entity, with current restriction considered entity = self._get_current_entity() # Set to hold used nonzero columns used_nzcs = set() # Create basis name and map to correct basis and get info. generate_psi_name = format["psi name"] psi_name = generate_psi_name(element_counter, self.entity_type, entity, component, deriv, avg) psi_name, non_zeros, zeros, ones = self.name_map[psi_name] # If all basis are zero we just return None. if zeros and self.optimise_parameters["ignore zero tables"]: return self._format_scalar_value(None)[()] # Get the index range of the loop index. loop_index_range = shape(self.unique_tables[psi_name])[1] # If domain type is custom, then special-case set loop index # range since table is empty if self.integral_type == "custom": loop_index_range = ffc_element.space_dimension() # Create loop index if loop_index_range > 1: # Pick first free index of secondary type # (could use primary indices, but it's better to avoid confusion). loop_index = format["free indices"][0] # If we have a quadrature element we can use the ip number to look # up the value directly. Need to add offset in case of components. if is_quad_element: quad_offset = 0 if component: # FIXME: Should we add a member function elements() to FiniteElement? if isinstance(ffc_element, MixedElement): for i in range(component): quad_offset += ffc_element.elements()[i].space_dimension() elif component != 1: error("Can't handle components different from 1 if we don't have a MixedElement.") else: quad_offset += ffc_element.space_dimension() if quad_offset: coefficient_access = format["add"]([f_ip, str(quad_offset)]) else: if non_zeros and f_ip == "0": # If we have non zero column mapping but only one value just pick it. # MSA: This should be an exact refactoring of the previous logic, # but I'm not sure if these lines were originally intended # here in the quad_element section, or what this even does: coefficient_access = str(non_zeros[1][0]) else: coefficient_access = f_ip elif non_zeros: if loop_index_range == 1: # If we have non zero column mapping but only one value just pick it. coefficient_access = str(non_zeros[1][0]) else: used_nzcs.add(non_zeros[0]) coefficient_access = format["component"](format["nonzero columns"](non_zeros[0]), loop_index) elif loop_index_range == 1: # If the loop index range is one we can look up the first component # in the coefficient array. coefficient_access = "0" else: # Or just set default coefficient access. coefficient_access = loop_index # Offset by element space dimension in case of negative restriction. offset = {"+": "", "-": str(ffc_element.space_dimension()), None: ""}[self.restriction] if offset: coefficient_access = format["add"]([coefficient_access, offset]) # Try to evaluate coefficient access ("3 + 2" --> "5"). try: coefficient_access = str(eval(coefficient_access)) C_ACCESS = GEO except: C_ACCESS = IP # Format coefficient access coefficient = format["coefficient"](str(ufl_function.count()), coefficient_access) # Build and cache some function data only if we need the basis # MSA: I don't understand the mix of loop index range check and ones check here, but that's how it was. if is_quad_element or (loop_index_range == 1 and ones and self.optimise_parameters["ignore ones"]): # If we only have ones or if we have a quadrature element we don't need the basis. function_symbol_name = coefficient F_ACCESS = C_ACCESS else: # Add basis name to set of used tables and add matrix access. # TODO: We should first add this table if the function is used later # in the expressions. If some term is multiplied by zero and it falls # away there is no need to compute the function value self.used_psi_tables.add(psi_name) # Create basis access, we never need to map the entry in the basis # table since we will either loop the entire space dimension or the # non-zeros. basis_index = "0" if loop_index_range == 1 else loop_index basis_access = format["component"]("", [f_ip, basis_index]) basis_name = psi_name + basis_access # Try to set access to the outermost possible loop if f_ip == "0" and basis_access == "0": B_ACCESS = GEO F_ACCESS = C_ACCESS else: B_ACCESS = IP F_ACCESS = IP # Format expression for function function_expr = self._create_product([self._create_symbol(basis_name, B_ACCESS)[()], self._create_symbol(coefficient, C_ACCESS)[()]]) # Check if the expression to compute the function value is already in # the dictionary of used function. If not, generate a new name and add. data = self.function_data.get(function_expr) if data is None: function_count = len(self.function_data) data = (function_count, loop_index_range, self._count_operations(function_expr), psi_name, used_nzcs, ufl_function.element()) self.function_data[function_expr] = data function_symbol_name = format["function value"](data[0]) # TODO: This access stuff was changed subtly during my refactoring, the # X_ACCESS vars is an attempt at making it right, make sure it is correct now! return self._create_symbol(function_symbol_name, F_ACCESS)[()] def _generate_affine_map(self): """Generate psi table for affine map, used by spatial coordinate to map integration point to physical element.""" # TODO: KBO: Perhaps it is better to create a fiat element and tabulate # the values at the integration points? f_FEA = format["affine map table"] f_ip = format["integration points"] affine_map = {1: lambda x: [1.0 - x[0], x[0]], 2: lambda x: [1.0 - x[0] - x[1], x[0], x[1]], 3: lambda x: [1.0 - x[0] - x[1] - x[2], x[0], x[1], x[2]]} num_ip = self.points w, points = self.quad_weights[num_ip] if self.facet0 is not None: points = map_facet_points(points, self.facet0) name = f_FEA(num_ip, self.facet0) elif self.vertex is not None: error("Spatial coordinates (x) not implemented for point measure (dP)") # TODO: Implement this, should be just the point. #name = f_FEA(num_ip, self.vertex) else: name = f_FEA(num_ip, 0) if name not in self.unique_tables: self.unique_tables[name] = array([affine_map[len(p)](p) for p in points]) if self.coordinate is None: ip = f_ip if num_ip > 1 else 0 r = None if self.facet1 is None else "+" self.coordinate = [name, self.gdim, ip, r] # ------------------------------------------------------------------------- # Helper functions for code_generation() # ------------------------------------------------------------------------- def _count_operations(self, expression): error("This function should be implemented by the child class.") def _create_entry_data(self, val): error("This function should be implemented by the child class.") ffc-1.6.0/ffc/quadrature/quadratureutils.py000066400000000000000000000400631255571034100210050ustar00rootroot00000000000000"Utility functions for quadrature representation." # Copyright (C) 2007-2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2007-03-16 # Last changed: 2015-03-27 # # Hacked by Marie E. Rognes 2013 # Modified by Anders Logg 2014 # Modified by Lizao Li 2015 # Python modules. import numpy # FFC modules. from ffc.log import debug, error, ffc_assert from ffc.cpp import format def create_psi_tables(tables, eliminate_zeros, entity_type): "Create names and maps for tables and non-zero entries if appropriate." debug("\nQG-utils, psi_tables:\n" + str(tables)) # Create element map {points:{element:number,},} # and a plain dictionary {name:values,}. element_map, flat_tables = flatten_psi_tables(tables, entity_type) debug("\nQG-utils, psi_tables, flat_tables:\n" + str(flat_tables)) # Reduce tables such that we only have those tables left with unique values # Create a name map for those tables that are redundant. name_map, unique_tables = unique_psi_tables(flat_tables, eliminate_zeros) debug("\nQG-utils, psi_tables, unique_tables:\n" + str(unique_tables)) debug("\nQG-utils, psi_tables, name_map:\n" + str(name_map)) return (element_map, name_map, unique_tables) def flatten_psi_tables(tables, entity_type): """Create a 'flat' dictionary of tables with unique names and a name map that maps number of quadrature points and element name to a unique element number. Input tables on the format for scalar and non-scalar elements respectively: tables[num_points][element][entity][derivs][ip][dof] tables[num_points][element][entity][derivs][ip][component][dof] Planning to change this into: tables[num_points][element][avg][entity][derivs][ip][dof] tables[num_points][element][avg][entity][derivs][ip][component][dof] Returns: element_map - { num_quad_points: {ufl_element: element_number} }. flat_tables - { unique_table_name: values[ip,dof] }. """ generate_psi_name = format["psi name"] def sorted_items(mapping, **sorted_args): return [(k, mapping[k]) for k in sorted(mapping.keys(), **sorted_args)] # Initialise return values and element counter. flat_tables = {} element_map = {} counter = 0 # There's a separate set of tables for each number of quadrature points for num_points, element_tables in sorted_items(tables): element_map[num_points] = {} # There's a set of tables for each element for element, avg_tables in sorted_items(element_tables, key=lambda x: str(x)): element_map[num_points][element] = counter # There's a set of tables for non-averaged and averaged (averaged only occurs with num_points == 1) for avg, entity_tables in sorted_items(avg_tables): # There's a set of tables for each entity number (only 1 for the cell, >1 for facets and vertices) for entity, derivs_tables in sorted_items(entity_tables): # There's a set of tables for each derivative combination for derivs, fiat_tables in sorted_items(derivs_tables): # Flatten fiat_table for tensor-valued basis # This is necessary for basic (non-tensor-product) # tensor elements if len(numpy.shape(fiat_tables)) > 3: shape = fiat_tables.shape value_shape = shape[1:-1] fiat_tables = fiat_tables.reshape((shape[0], numpy.product(value_shape), shape[-1])) # Transform fiat_tables to a list of tables on the form psi_table[dof][ip] for each scalar component if element.value_shape(): # Table is on the form fiat_tables[ip][component][dof]. transposed_table = numpy.transpose(fiat_tables, (1,2,0)) component_tables = list(enumerate(transposed_table)) #component_tables = [numpy.transpose(fiat_tables[:,i,:] for i in range(fiat_tables.shape[1]))] else: # Scalar element, table is on the form fiat_tables[ip][dof]. # Using () for the component because generate_psi_name expects that component_tables = [((), numpy.transpose(fiat_tables))] # Iterate over the innermost tables for each scalar component for component, psi_table in component_tables: # Generate the table name. name = generate_psi_name(counter, entity_type, entity, component, derivs, avg) # Verify shape of basis (can be omitted for speed if needed). #if not (num_points is None or (len(numpy.shape(psi_table)) == 2 and numpy.shape(psi_table)[0] == num_points)): # error("This table has the wrong shape: " + str(psi_table)) # Verify uniqueness of names if name in flat_tables: error("Table name is not unique, something is wrong:\n name = %s\n table = %s\n" % (name, flat_tables)) # Store table with unique name flat_tables[name] = psi_table # Increase unique numpoints*element counter counter += 1 return (element_map, flat_tables) def unique_psi_tables(tables, eliminate_zeros): """Returns a name map and a dictionary of unique tables. The function checks if values in the tables are equal, if this is the case it creates a name mapping. It also create additional information (depending on which parameters are set) such as if the table contains all ones, or only zeros, and a list on non-zero columns. unique_tables - {name:values,}. name_map - {original_name:[new_name, non-zero-columns (list), is zero (bool), is ones (bool)],}.""" # Get unique tables (from old table utility). name_map, inverse_name_map = unique_tables(tables) debug("\ntables: " + str(tables)) debug("\nname_map: " + str(name_map)) debug("\ninv_name_map: " + str(inverse_name_map)) # Set values to zero if they are lower than threshold. format_epsilon = format["epsilon"] for name in tables: # Get values. vals = tables[name] for r in range(numpy.shape(vals)[0]): for c in range(numpy.shape(vals)[1]): if abs(vals[r][c]) < format_epsilon: vals[r][c] = 0 tables[name] = vals # Extract the column numbers that are non-zero. # If optimisation option is set # counter for non-zero column arrays. i = 0 non_zero_columns = {} if eliminate_zeros: for name in sorted(tables.keys()): # Get values. vals = tables[name] # Skip if values are missing if len(vals) == 0: continue # Use the first row as reference. non_zeros = list(vals[0].nonzero()[0]) # If all columns in the first row are non zero, there's no point # in continuing. if len(non_zeros) == numpy.shape(vals)[1]: continue # If we only have one row (IP) we just need the nonzero columns. if numpy.shape(vals)[0] == 1: if list(non_zeros): non_zeros.sort() non_zero_columns[name] = (i, non_zeros) # Compress values. tables[name] = vals[:, non_zeros] i += 1 # Check if the remaining rows are nonzero in the same positions, else expand. else: for j in range(1, numpy.shape(vals)[0]): # All rows must have the same non-zero columns # for the optimization to work (at this stage). new_non_zeros = list(vals[j].nonzero()[0]) if non_zeros != new_non_zeros: non_zeros = non_zeros + [c for c in new_non_zeros if not c in non_zeros] # If this results in all columns being non-zero, continue. if len(non_zeros) == numpy.shape(vals)[1]: continue # Only add nonzeros if it results in a reduction of columns. if len(non_zeros) != numpy.shape(vals)[1]: if list(non_zeros): non_zeros.sort() non_zero_columns[name] = (i, non_zeros) # Compress values. tables[name] = vals[:, non_zeros] i += 1 # Check if we have some zeros in the tables. names_zeros = contains_zeros(tables) # Get names of tables with all ones. names_ones = get_ones(tables) # Add non-zero column, zero and ones info to inverse_name_map # (so we only need to pass around one name_map to code generating functions). for name in inverse_name_map: if inverse_name_map[name] in non_zero_columns: nzc = non_zero_columns[inverse_name_map[name]] zero = inverse_name_map[name] in names_zeros ones = inverse_name_map[name] in names_ones inverse_name_map[name] = [inverse_name_map[name], nzc, zero, ones] else: zero = inverse_name_map[name] in names_zeros ones = inverse_name_map[name] in names_ones inverse_name_map[name] = [inverse_name_map[name], (), zero, ones] # If we found non zero columns we might be able to reduce number of tables further. if non_zero_columns: # Try reducing the tables. This is possible if some tables have become # identical as a consequence of compressing the tables. # This happens with e.g., gradients of linear basis # FE0 = {-1,0,1}, nzc0 = [0,2] # FE1 = {-1,1,0}, nzc1 = [0,1] -> FE0 = {-1,1}, nzc0 = [0,2], nzc1 = [0,1]. # Call old utility function again. nm, inv_nm = unique_tables(tables) # Update name maps. for name in inverse_name_map: if inverse_name_map[name][0] in inv_nm: inverse_name_map[name][0] = inv_nm[inverse_name_map[name][0]] for name in nm: maps = nm[name] for m in maps: if not name in name_map: name_map[name] = [] if m in name_map: name_map[name] += name_map[m] + [m] del name_map[m] else: name_map[name].append(m) # Get new names of tables with all ones (for vector constants). names = get_ones(tables) # Because these tables now contain ones as a consequence of compression # we still need to consider the non-zero columns when looking up values # in coefficient arrays. The psi entries can however we neglected and we # don't need to tabulate the values (if option is set). for name in names: if name in name_map: maps = name_map[name] for m in maps: inverse_name_map[m][3] = True if name in inverse_name_map: inverse_name_map[name][3] = True # Write protect info and return values for name in inverse_name_map: inverse_name_map[name] = tuple(inverse_name_map[name]) # Note: inverse_name_map here is called name_map in create_psi_tables and the quadraturetransformerbase class return (inverse_name_map, tables) def unique_tables(tables): """Removes tables with redundant values and returns a name_map and a inverse_name_map. E.g., tables = {a:[0,1,2], b:[0,2,3], c:[0,1,2], d:[0,1,2]} results in: tables = {a:[0,1,2], b:[0,2,3]} name_map = {a:[c,d]} inverse_name_map = {a:a, b:b, c:a, d:a}.""" format_epsilon = format["epsilon"] name_map = {} inverse_name_map = {} names = sorted(tables.keys()) mapped = [] # Loop all tables to see if some are redundant. for i in range(len(names)): name0 = names[i] if name0 in mapped: continue val0 = numpy.array(tables[name0]) for j in range(i + 1, len(names)): name1 = names[j] if name1 in mapped: continue val1 = numpy.array(tables[name1]) # Check if dimensions match. if numpy.shape(val0) == numpy.shape(val1): # Check if values are the same. if len(val0) > 0 and abs(val0 - val1).max() < format_epsilon: mapped.append(name1) del tables[name1] if name0 in name_map: name_map[name0].append(name1) else: name_map[name0] = [name1] # Create inverse name map. inverse_name_map[name1] = name0 # Add self. for name in tables: if not name in inverse_name_map: inverse_name_map[name] = name return (name_map, inverse_name_map) def get_ones(tables): "Return names of tables for which all values are 1.0." f_epsilon = format["epsilon"] names = [] for name in tables: vals = tables[name] if len(vals) > 0 and abs(vals - numpy.ones(numpy.shape(vals))).max() < f_epsilon: names.append(name) return names def contains_zeros(tables): "Checks if any tables contains only zeros." f_epsilon = format["epsilon"] names = [] for name in tables: vals = tables[name] if len(vals) > 0 and abs(vals).max() < f_epsilon: names.append(name) return names def create_permutations(expr): # This is probably not used. if len(expr) == 0: return expr # Format keys and values to lists and tuples. if len(expr) == 1: new = {} for key, val in expr[0].items(): if key == (): pass elif not isinstance(key[0], tuple): key = (key,) if not isinstance(val, list): val = [val] new[key] = val return new # Create permutations of two lists. # TODO: there could be a cleverer way of changing types of keys and vals. if len(expr) == 2: new = {} for key0, val0 in expr[0].items(): if isinstance(key0[0], tuple): key0 = list(key0) if not isinstance(key0, list): key0 = [key0] if not isinstance(val0, list): val0 = [val0] for key1, val1 in expr[1].items(): if key1 == (): key1 = [] elif isinstance(key1[0], tuple): key1 = list(key1) if not isinstance(key1, list): key1 = [key1] if not isinstance(val1, list): val1 = [val1] ffc_assert(tuple(key0 + key1) not in new, "This is not supposed to happen.") new[tuple(key0 + key1)] = val0 + val1 return new # Create permutations by calling this function recursively. # This is only used for rank > 2 tensors I think. if len(expr) > 2: new = permutations(expr[0:2]) return permutations(new + expr[2:]) ffc-1.6.0/ffc/quadrature/reduce_operations.py000066400000000000000000000644141255571034100212670ustar00rootroot00000000000000"Some simple functions for manipulating expressions symbolically" # Copyright (C) 2008-2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . from ufl.utils.sorting import sorted_by_key # FFC modules from ffc.log import error from collections import deque def split_expression(expression, format, operator, allow_split = False): """Split the expression at the given operator, return list. Do not split () or [] unless told to split (). This is to enable easy count of double operations which can be in (), but in [] we only have integer operations.""" # Get formats access = format["component"]("", [""]) group = format["grouping"]("") la = access[0] ra = access[1] lg = group[0] rg = group[1] # Split with given operator prods = deque(expression.split(operator)) new_prods = [prods.popleft()] while prods: # Continue while we still have list of potential products # p is the first string in the product p = prods.popleft() # If the number of "[" and "]" doesn't add up in the last entry of the # new_prods list, add p and see if it helps for next iteration if new_prods[-1].count(la) != new_prods[-1].count(ra): new_prods[-1] = operator.join([new_prods[-1], p]) # If the number of "(" and ")" doesn't add up (and we didn't allow a split) # in the last entry of the new_prods list, add p and see if it helps for next iteration elif new_prods[-1].count(lg) != new_prods[-1].count(rg) and not allow_split: new_prods[-1] = operator.join([new_prods[-1], p]) # If everything was fine, we can start a new entry in the new_prods list else: new_prods.append(p) return new_prods def operation_count(expression, format): """This function returns the number of double operations in an expression. We do split () but not [] as we only have unsigned integer operations in [].""" # Note we do not subtract 1 for the additions, because there is also an # assignment involved adds = len(split_expression(expression, format, format["add"](["", ""]), True)) - 1 mults = len(split_expression(expression, format, format["multiply"](["", ""]), True)) - 1 return mults + adds def get_simple_variables(expression, format): """This function takes as argument an expression (preferably expanded): expression = "x*x + y*x + x*y*z" returns a list of products and a dictionary: prods = ["x*x", "y*x", "x*y*z"] variables = {variable: [num_occurences, [pos_in_prods]]} variables = {"x":[3, [0,1,2]], "y":[2, [1,2]], "z":[1, [2]]}""" # Get formats add = format["add"](["", ""]) mult = format["multiply"](["", ""]) group = format["grouping"]("") format_float = format["floating point"] prods = split_expression(expression, format, add) prods = [p for p in prods if p] variables = {} for i, p in enumerate(prods): # Only extract unique variables vrs = list(set( split_expression(p, format, mult) )) for v in vrs: # Try to convert variable to floats and back (so '2' == '2.0' etc.) try: v = format_float(float(v)) except: pass if v in variables: variables[v][0] += 1 variables[v][1].append(i) else: variables[v] = [1, [i]] return (prods, variables) def group_vars(expr, format): """Group variables in an expression, such that: "x + y + z + 2*y + 6*z" = "x + 3*y + 7*z" "x*x + x*x + 2*x + 3*x + 5" = "2.0*x*x + 5.0*x + 5" "x*y + y*x + 2*x*y + 3*x + 0*x + 5" = "5.0*x*y + 3.0*x + 5" "(y + z)*x + 5*(y + z)*x" = "6.0*(y + z)*x" "1/(x*x) + 2*1/(x*x) + std::sqrt(x) + 6*std::sqrt(x)" = "3*1/(x*x) + 7*std::sqrt(x)" """ # Get formats format_float = format["floating point"] add = format["add"](["", ""]) mult = format["multiply"](["", ""]) new_prods = {} # Get list of products prods = split_expression(expr, format, add) # Loop products and collect factors for p in prods: # Get list of variables, and do a basic sort vrs = split_expression(p, format, mult) factor = 1 new_var = [] # Try to multiply factor with variable, else variable must be multiplied by factor later # If we don't have a variable, set factor to zero and break for v in vrs: if v: try: f = float(v) factor *= f except: new_var.append(v) else: factor = 0 break # Create new variable that must be multiplied with factor. Add this # variable to dictionary, if it already exists add factor to other factors new_var.sort() new_var = mult.join(new_var) if new_var in new_prods: new_prods[new_var] += factor else: new_prods[new_var] = factor # Reset products prods = [] for prod, f in sorted_by_key(new_prods): # If we have a product append mult of both if prod: # If factor is 1.0 we don't need it if f == 1.0: prods.append(prod) else: prods.append(mult.join([format_float(f), prod])) # If we just have a factor elif f: prods.append(format_float(f)) prods.sort() return add.join(prods) def reduction_possible(variables): """Find the variable that occurs in the most products, if more variables occur the same number of times and in the same products add them to list.""" # Find the variable that appears in the most products max_val = 1 max_var = "" max_vars = [] for key, val in sorted_by_key(variables): if max_val < val[0]: max_val = val[0] max_var = key # If we found a variable that appears in products multiple times, check if # other variables appear in the exact same products if max_var: for key, val in sorted_by_key(variables): # Check if we have more variables in the same products if max_val == val[0] and variables[max_var][1] == val[1]: max_vars.append(key) return max_vars def is_constant(variable, format, constants = [], from_is_constant = False): """Determine if a variable is constant or not. The function accepts an optional list of variables (loop indices) that will be regarded as constants for the given variable. If none are supplied it is assumed that all array accesses will result in a non-constant variable. v = 2.0, is constant v = Jinv_00*det, is constant v = w[0][1], is constant v = 2*w[0][1], is constant v = W0[ip], is constant if constants = ['ip'] else not v = P_t0[ip][j], is constant if constants = ['j','ip'] else not""" # Get formats access = format["array access"]("") add = format["add"](["", ""]) mult = format["multiply"](["", ""]) l = access[0] r = access[1] if not variable.count(l) == variable.count(r): print("variable: ", variable) error("Something wrong with variable") # Be sure that we don't have a compound variable = expand_operations(variable, format) prods = split_expression(variable, format, add) new_prods = [] # Loop all products and variables and check if they're constant for p in prods: vrs = split_expression(p, format, mult) for v in vrs: # Check if each variable is constant, if just one fails the entire # variable is considered not to be constant const_var = False # If variable is in constants, well.... if v in constants: const_var = True continue # If we don't have any '[' or ']' we have a constant # (unless we're dealing with a call from this funtions) elif not v.count(l) and not from_is_constant: const_var = True continue # If we have an array access variable, see if the index is regarded a constant elif v.count(l): # Check if access is OK ('[' is before ']') if not v.index(l) < v.index(r): print("variable: ", v) error("Something is wrong with the array access") # Auxiliary variables index = ""; left = 0; inside = False; indices = [] # Loop all characters in variable and find indices for c in v: # If character is ']' reduce left count if c == r: left -= 1 # If the '[' count has returned to zero, we have a complete index if left == 0 and inside: const_index = False # Aux. var if index in constants: const_index = True try: int(index) const_index = True except: # Last resort, call recursively if is_constant(index, format, constants, True): const_index = True pass # Append index and reset values if const_index: indices.append(const_index) else: indices = [False] break index = "" inside = False # If we're inside an access, add character to index if inside: index += c # If character is '[' increase the count, and we're inside an access if c == l: inside = True left += 1 # If all indices were constant, the variable is constant if all(indices): const_var = True continue else: # If it is a float, it is also constant try: float(v) const_var = True continue except: pass # I no tests resulted in a constant variable, there is no need to continue if not const_var: return False # If all variables were constant return True return True def expand_operations(expression, format): """This function expands an expression and returns the value. E.g., ((x + y)) --> x + y 2*(x + y) --> 2*x + 2*y (x + y)*(x + y) --> x*x + y*y + 2*x*y z*(x*(y + 3) + 2) + 1 --> 1 + 2*z + x*y*z + x*z*3 z*((y + 3)*x + 2) + 1 --> 1 + 2*z + x*y*z + x*z*3""" # Get formats add = format["add"](["", ""]) mult = format["multiply"](["", ""]) group = format["grouping"]("") l = group[0] r = group[1] # Check that we have the same number of left/right parenthesis in expression if not expression.count(l) == expression.count(r): error("Number of left/right parenthesis do not match") # If we don't have any parenthesis, group variables and return if expression.count(l) == 0: return group_vars(expression, format) # Get list of additions adds = split_expression(expression, format, add) new_adds = [] # Loop additions and get products for a in adds: prods = split_expression(a, format, mult) prods.sort() new_prods = [] # FIXME: Should we use deque here? expanded = [] for i, p in enumerate(prods): # If we have a group, expand inner expression if p[0] == l and p[-1] == r: # Add remaining products to new products and multiply with all # terms from expanded variable expanded_var = expand_operations(p[1:-1], format) expanded.append( split_expression(expanded_var, format, add) ) # Else, just add variable to list of new products else: new_prods.append(p) if expanded: # Combine all expanded variables and multiply by factor while len(expanded) > 1: first = expanded.pop(0) second = expanded.pop(0) expanded = [[mult.join([i] + [j]) for i in first for j in second]] + expanded new_adds += [mult.join(new_prods + [e]) for e in expanded[0]] else: # Else, just multiply products and add to list of products new_adds.append( mult.join(new_prods) ) # Group variables and return return group_vars(add.join(new_adds), format) def reduce_operations(expression, format): """This function reduces the number of opertions needed to compute a given expression. It looks for the variable that appears the most and groups terms containing this variable inside parenthesis. The function is called recursively until no further reductions are possible. "x + y + x" = 2*x + y "x*x + 2.0*x*y + y*y" = y*y + (2.0*y + x)*x, not (x + y)*(x + y) as it should be!! z*x*y + z*x*3 + 2*z + 1" = z*(x*(y + 3) + 2) + 1""" # Get formats add = format["add"](["", ""]) mult = format["multiply"](["", ""]) group = format["grouping"]("") # Be sure that we have an expanded expression expression = expand_operations(expression, format) # Group variables to possibly reduce complexity expression = group_vars(expression, format) # Get variables and products prods, variables = get_simple_variables(expression, format) # Get the variables for which we can reduce the expression max_vars = reduction_possible(variables) new_prods = [] no_mult = [] max_vars.sort() # If we have variables that can be moved outside if max_vars: for p in prods: # Get the list of variables in current product li = split_expression(p, format, mult) li.sort() # If the list of products is the same as what we intend of moving # outside the parenthesis, leave it # (because x + x*x + x*y should be x + (x + y)*x NOT (1.0 + x + y)*x) if li == max_vars: no_mult.append(p) continue else: # Get list of all variables from max_vars that are in li indices = [i for i in max_vars if i in li] # If not all were present add to list of terms that shouldn't be # multiplied with variables and continue if indices != max_vars: no_mult.append(p) continue # Remove variables that we are moving outside for v in max_vars: li.remove(v) # Add to list of products p = mult.join(li) new_prods.append(p) # Sort lists no_mult.sort() new_prods.sort() else: # No reduction possible return expression # Recursively reduce sums with and without reduced variable new_prods = add.join(new_prods) if new_prods: new_prods = reduce_operations(new_prods, format) if no_mult: no_mult = [reduce_operations(add.join(no_mult), format)] # Group new products if we have a sum g = new_prods len_new_prods = len(split_expression(new_prods, format, add)) if len_new_prods > 1: g = format["grouping"](new_prods) # The new expression is the sum of terms that couldn't be reduced and terms # that could be reduced multiplied by the reduction e.g., # expr = z + (x + y)*x new_expression = add.join(no_mult + [mult.join([g, mult.join(max_vars)])]) return new_expression def get_geo_terms(expression, geo_terms, offset, format): """This function returns a new expression where all geometry terms have been substituted with geometry declarations, these declarations are added to the geo_terms dictionary. """ # Get formats add = format["add"](["", ""]) mult = format["multiply"](["", ""]) access = format["array access"]("") grouping = format["grouping"] group = grouping("") format_G = format["geometry tensor"] gl = group[0] gr = group[1] l = access[0] r = access[1] # Get the number of geometry declaration, possibly offset value num_geo = offset + len(geo_terms) new_prods = [] # Split the expression into products prods = split_expression(expression, format, add) consts = [] # Loop products and check if the variables are constant for p in prods: vrs = split_expression(p, format, mult) geos = [] # Generate geo code for constant coefficients e.g., w[0][5] new_vrs = [] for v in vrs: # If variable is a group, get the geometry terms and update geo number if v[0] == gl and v[-1] == gr: v = get_geo_terms(v[1:-1], geo_terms, offset, format) num_geo = offset + len(geo_terms) # If we still have a sum, regroup if len(v.split(add)) > 1: v = grouping(v) # Append to new variables new_vrs.append(v) # If variable is constants, add to geo terms constant = is_constant(v, format) if constant: geos.append(v) # Update variable list vrs = new_vrs; vrs.sort() # Sort geo and create geometry term geos.sort() geo = mult.join(geos) # Handle geometry term appropriately if geo: if geos != vrs: if len(geos) > 1: for g in geos: vrs.remove(g) if not geo in geo_terms: geo_terms[geo] = format_G + str(num_geo) num_geo += 1 vrs.append(geo_terms[geo]) new_prods.append(mult.join(vrs)) else: consts.append(mult.join(vrs)) else: new_prods.append(mult.join(vrs)) if consts: if len(consts) > 1: c = grouping(add.join(consts)) else: c = add.join(consts) if not c in geo_terms: geo_terms[c] = format_G + str(num_geo) num_geo += 1 consts = [geo_terms[c]] return add.join(new_prods + consts) def get_constants(expression, const_terms, format, constants = []): """This function returns a new expression where all geometry terms have been substituted with geometry declarations, these declarations are added to the const_terms dictionary. """ # Get formats add = format["add"](["", ""]) mult = format["multiply"](["", ""]) access = format["array access"]("") grouping = format["grouping"] group = grouping("") format_G = format["geometry tensor"] + "".join(constants) #format["geometry tensor"] gl = group[0] gr = group[1] l = access[0] r = access[1] # Get the number of geometry declaration, possibly offset value num_geo = len(const_terms) new_prods = [] # Split the expression into products prods = split_expression(expression, format, add) consts = [] # Loop products and check if the variables are constant for p in prods: vrs = split_expression(p, format, mult) geos = [] # Generate geo code for constant coefficients e.g., w[0][5] new_vrs = [] for v in vrs: # If variable is constants, add to geo terms constant = is_constant(v, format, constants) if constant: geos.append(v) # Append to new variables new_vrs.append(v) # Update variable list vrs = new_vrs; vrs.sort() # Sort geo and create geometry term geos.sort() geo = mult.join(geos) if geo: if geos != vrs: for g in geos: vrs.remove(g) if not geo in const_terms: const_terms[geo] = format_G + str(num_geo) num_geo += 1 vrs.append(const_terms[geo]) new_prods.append(mult.join(vrs)) else: consts.append(mult.join(vrs)) else: new_prods.append(mult.join(vrs)) if consts: if len(consts) > 1: c = grouping(add.join(consts)) else: c = add.join(consts) if not c in const_terms: const_terms[c] = format_G + str(num_geo) num_geo += 1 consts = [const_terms[c]] return add.join(new_prods + consts) def get_indices(variable, format, from_get_indices = False): """This function returns the indices of a given variable. E.g., P[0][j], returns ['j'] P[ip][k], returns ['ip','k'] P[ip][nzc0[j] + 3], returns ['ip','j'] w[0][j + 2] , returns [j]""" add = format["add"](["", ""]) mult = format["multiply"](["", ""]) format_access = format["array access"] access = format_access("") l = access[0] r = access[1] indices = [] # If there are no '[' in variable and self is the caller if not variable.count(l) and from_get_indices: adds = split_expression(variable, format, add) for a in adds: mults = split_expression(a, format, mult) for m in mults: try: float(m) except: if not m in indices: indices.append(m) else: index = ""; left = 0; inside = False; # Loop all characters in variable and find indices for c in variable: # If character is ']' reduce left count if c == r: left -= 1 # If the '[' count has returned to zero, we have a complete index if left == 0 and inside: try: eval(index) except: indices += get_indices(index, format, True) index = "" inside = False # If we're inside an access, add character to index if inside: index += c # If character is '[' increase the count, and we're inside an access if c == l: inside = True left += 1 return indices def get_variables(expression, variables, format, constants = []): """This function returns a new expression where all geometry terms have been substituted with geometry declarations, these declarations are added to the const_terms dictionary. """ # Get formats add = format["add"](["", ""]) mult = format["multiply"](["", ""]) format_access = format["array access"] access = format_access("") grouping = format["grouping"] group = grouping("") format_F = format["function value"] format_ip = format["integration points"] gl = group[0] gr = group[1] l = access[0] r = access[1] # If we don't have any access operators in expression, # we don't have any variables if expression.count(l) == 0: return expression # Get the number of geometry declaration, possibly offset value num_var = len(variables) new_prods = [] used_vars = [] # Split the expression into products prods = split_expression(expression, format, add) consts = [] # Loop products and check if the variables are constant for p in prods: vrs = split_expression(p, format, mult) # Variables with respect to the constants in list variables_of_interest = [] # Generate geo code for constant coefficients e.g., w[0][5] new_vrs = [] for v in vrs: # If we don't have any access operators, we don't have a variable if v.count(l) == 0: new_vrs.append(v) continue # Check if we have a variable that depends on one of the constants # First check the easy way is_var = False for c in constants: if format_access(c) in v: is_var = True break if is_var: variables_of_interest.append(v) continue # Then check the hard way # Get list of indices indices = get_indices(v, format) depends = [True for c in constants if c in indices] if any(depends): variables_of_interest.append(v) else: new_vrs.append(v) variables_of_interest.sort() variables_of_interest = mult.join(variables_of_interest) # If we have some variables, declare new variable if needed and add # to list of variables if variables_of_interest: # If we didn't already declare this variable do so if not variables_of_interest in variables: variables[variables_of_interest] = format_F + str(num_var) num_var += 1 # Get mapped variable mv = variables[variables_of_interest] new_vrs.append(mv) if not mv in used_vars: used_vars.append(mv) # Sort variables and add to list of products new_vrs.sort() new_prods.append(mult.join(new_vrs)) # Sort list of products and return the sum new_prods.sort() return (add.join(new_prods), used_vars) ffc-1.6.0/ffc/quadrature/sumobj.py000066400000000000000000000552751255571034100170610ustar00rootroot00000000000000"This file implements a class to represent a sum." # Copyright (C) 2009-2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . from ufl.utils.sorting import sorted_by_key # FFC modules. from ffc.log import error from ffc.cpp import format # FFC quadrature modules. from .symbolics import create_float from .symbolics import create_product from .symbolics import create_sum from .symbolics import create_fraction from .expr import Expr import six #global ind #ind = "" class Sum(Expr): __slots__ = ("vrs", "_expanded", "_reduced") def __init__(self, variables): """Initialise a Sum object, it derives from Expr and contains the additional variables: vrs - list, a list of variables. _expanded - object, an expanded object of self, e.g., self = 'x + x'-> self._expanded = 2*x (a product). _reduced - object, a reduced object of self, e.g., self = '2*x + x*y'-> self._reduced = x*(2 + y) (a product). NOTE: self._prec = 3.""" # Initialise value, list of variables, class, expanded and reduced. self.val = 1.0 self.vrs = [] self._prec = 3 self._expanded = False self._reduced = False # Get epsilon EPS = format["epsilon"] # Process variables if we have any. if variables: # Loop variables and remove nested Sums and collect all floats in # 1 variable. We don't collect [x, x, x] into 3*x to avoid creating # objects, instead we do this when expanding the object. float_val = 0.0 for var in variables: # Skip zero terms. if abs(var.val) < EPS: continue elif var._prec == 0: # float float_val += var.val continue elif var._prec == 3: # sum # Loop and handle variables of nested sum. for v in var.vrs: if abs(v.val) < EPS: continue elif v._prec == 0: # float float_val += v.val continue self.vrs.append(v) continue self.vrs.append(var) # Only create new float if value is different from 0. if abs(float_val) > EPS: self.vrs.append(create_float(float_val)) # If we don't have any variables the sum is zero. else: self.val = 0.0 self.vrs = [create_float(0)] # Handle zero value. if not self.vrs: self.val = 0.0 self.vrs = [create_float(0)] # Type is equal to the smallest type in both lists. self.t = min([v.t for v in self.vrs]) # Sort variables, (for representation). self.vrs.sort() # Compute the representation now, such that we can use it directly # in the __eq__ and __ne__ methods (improves performance a bit, but # only when objects are cached). self._repr = "Sum([%s])" % ", ".join([v._repr for v in self.vrs]) # Use repr as hash value. self._hash = hash(self._repr) # Print functions. def __str__(self): "Simple string representation which will appear in the generated code." # First add all the positive variables using plus, then add all # negative variables. s = format["add"]([str(v) for v in self.vrs if not v.val < 0]) +\ "".join([str(v) for v in self.vrs if v.val < 0]) # Group only if we have more that one variable. if len(self.vrs) > 1: return format["grouping"](s) return s # Binary operators. def __add__(self, other): "Addition by other objects." # Return a new sum return create_sum([self, other]) def __sub__(self, other): "Subtract other objects." # Return a new sum return create_sum([self, create_product([FloatValue(-1), other])]) def __mul__(self, other): "Multiplication by other objects." # If product will be zero. if self.val == 0.0 or other.val == 0.0: return create_float(0) # NOTE: We expect expanded sub-expressions with no nested operators. # Create list of new products using the '*' operator # TODO: Is this efficient? new_prods = [v*other for v in self.vrs] # Remove zero valued terms. # TODO: Can this still happen? new_prods = [v for v in new_prods if v.val != 0.0] # Create new sum. if not new_prods: return create_float(0) elif len(new_prods) > 1: # Expand sum to collect terms. return create_sum(new_prods).expand() # TODO: Is it necessary to call expand? return new_prods[0].expand() def __truediv__(self, other): "Division by other objects." # If division is illegal (this should definitely not happen). if other.val == 0.0: error("Division by zero.") # If fraction will be zero. if self.val == 0.0: return create_float(0) # NOTE: assuming that we get expanded variables. # If other is a Sum we can only return a fraction. # TODO: We could check for equal sums if Sum.__eq__ could be trusted. # As it is now (2*x + y) == (3*x + y), which works for the other things I do. # NOTE: Expect that other is expanded i.e., x + x -> 2*x which can be handled. # TODO: Fix (1 + y) / (x + x*y) -> 1 / x # Will this be handled when reducing operations on a fraction? if other._prec == 3: # sum return create_fraction(self, other) # NOTE: We expect expanded sub-expressions with no nested operators. # Create list of new products using the '*' operator. # TODO: Is this efficient? new_fracs = [v/other for v in self.vrs] # Remove zero valued terms. # TODO: Can this still happen? new_fracs = [v for v in new_fracs if v.val != 0.0] # Create new sum. # TODO: No need to call expand here, using the '/' operator should have # taken care of this. if not new_fracs: return create_float(0) elif len(new_fracs) > 1: return create_sum(new_fracs) return new_fracs[0] __div__ = __truediv__ # Public functions. def expand(self): "Expand all members of the sum." # If sum is already expanded, simply return the expansion. if self._expanded: return self._expanded # TODO: This function might need some optimisation. # Sort variables into symbols, products and fractions (add floats # directly to new list, will be handled later). Add fractions if # possible else add to list. new_variables = [] syms = [] prods = [] frac_groups = {} # TODO: Rather than using '+', would it be more efficient to collect # the terms first? for var in self.vrs: exp = var.expand() # TODO: Should we also group fractions, or put this in a separate function? if exp._prec in (0, 4): # float or frac new_variables.append(exp) elif exp._prec == 1: # sym syms.append(exp) elif exp._prec == 2: # prod prods.append(exp) elif exp._prec == 3: # sum for v in exp.vrs: if v._prec in (0, 4): # float or frac new_variables.append(v) elif v._prec == 1: # sym syms.append(v) elif v._prec == 2: # prod prods.append(v) # Sort all variables in groups: [2*x, -7*x], [(x + y), (2*x + 4*y)] etc. # First handle product in order to add symbols if possible. prod_groups = {} for v in prods: if v.get_vrs() in prod_groups: prod_groups[v.get_vrs()] += v else: prod_groups[v.get_vrs()] = v sym_groups = {} # Loop symbols and add to appropriate groups. for v in syms: # First try to add to a product group. if (v,) in prod_groups: prod_groups[(v,)] += v # Then to other symbols. elif v in sym_groups: sym_groups[v] += v # Create a new entry in the symbols group. else: sym_groups[v] = v # Loop groups and add to new variable list. for k,v in sorted_by_key(sym_groups): new_variables.append(v) for k,v in sorted_by_key(prod_groups): new_variables.append(v) # for k,v in frac_groups.iteritems(): # new_variables.append(v) # append(v) if len(new_variables) > 1: # Return new sum (will remove multiple instances of floats during construction). self._expanded = create_sum(sorted(new_variables)) return self._expanded elif new_variables: # If we just have one variable left, return it since it is already expanded. self._expanded = new_variables[0] return self._expanded error("Where did the variables go?") def get_unique_vars(self, var_type): "Get unique variables (Symbols) as a set." # Loop all variables of self update the set. var = set() for v in self.vrs: var.update(v.get_unique_vars(var_type)) return var def get_var_occurrences(self): """Determine the number of minimum number of times all variables occurs in the expression. Returns a dictionary of variables and the number of times they occur. x*x + x returns {x:1}, x + y returns {}.""" # NOTE: This function is only used if the numerator of a Fraction is a Sum. # Get occurrences in first expression. d0 = self.vrs[0].get_var_occurrences() for var in self.vrs[1:]: # Get the occurrences. d = var.get_var_occurrences() # Delete those variables in d0 that are not in d. for k, v in list(d0.items()): if not k in d: del d0[k] # Set the number of occurrences equal to the smallest number. for k, v in sorted_by_key(d): if k in d0: d0[k] = min(d0[k], v) return d0 def ops(self): "Return number of operations to compute value of sum." # Subtract one operation as it only takes n-1 ops to sum n members. op = -1 # Add the number of operations from sub-expressions. for v in self.vrs: # +1 for the +/- symbol. op += v.ops() + 1 return op def reduce_ops(self): "Reduce the number of operations needed to evaluate the sum." # global ind # ind += " " # print "\n%sreduce_ops, start" % ind if self._reduced: return self._reduced # NOTE: Assuming that sum has already been expanded. # TODO: Add test for this and handle case if it is not. # TODO: The entire function looks expensive, can it be optimised? # TODO: It is not necessary to create a new Sum if we do not have more # than one Fraction. # First group all fractions in the sum. new_sum = _group_fractions(self) if new_sum._prec != 3: # sum self._reduced = new_sum.reduce_ops() return self._reduced # Loop all variables of the sum and collect the number of common # variables that can be factored out. common_vars = {} for var in new_sum.vrs: # Get dictonary of occurrences and add the variable and the number # of occurrences to common dictionary. for k, v in sorted_by_key(var.get_var_occurrences()): # print # print ind + "var: ", var # print ind + "k: ", k # print ind + "v: ", v if k in common_vars: common_vars[k].append((v, var)) else: common_vars[k] = [(v, var)] # print # print "common vars: " # for k,v in common_vars.items(): # print "k: ", k # print "v: ", v # print # Determine the maximum reduction for each variable # sorted as: {(x*x*y, x*y*z, 2*y):[2, [y]]}. terms_reductions = {} for k, v in sorted_by_key(common_vars): # print # print ind + "k: ", k # print ind + "v: ", v # If the number of expressions that can be reduced is only one # there is nothing to be done. if len(v) > 1: # TODO: Is there a better way to compute the reduction gain # and the number of occurrences we should remove? # Get the list of number of occurences of 'k' in expressions # in 'v'. occurrences = [t[0] for t in v] # Determine the favorable number of occurences and an estimate # of the maximum reduction for current variable. fav_occur = 0 reduc = 0 for i in set(occurrences): # Get number of terms that has a number of occcurences equal # to or higher than the current number. num_terms = len([o for o in occurrences if o >= i]) # An estimate of the reduction in operations is: # (number_of_terms - 1) * number_occurrences. new_reduc = (num_terms-1)*i if new_reduc > reduc: reduc = new_reduc fav_occur = i # Extract the terms of v where the number of occurrences is # equal to or higher than the most favorable number of occurrences. terms = sorted([t[1] for t in v if t[0] >= fav_occur]) # We need to reduce the expression with the favorable number of # occurrences of the current variable. red_vars = [k]*fav_occur # If the list of terms is already present in the dictionary, # add the reduction count and the variables. if tuple(terms) in terms_reductions: terms_reductions[tuple(terms)][0] += reduc terms_reductions[tuple(terms)][1] += red_vars else: terms_reductions[tuple(terms)] = [reduc, red_vars] # print "\nterms_reductions: " # for k,v in terms_reductions.items(): # print "k: ", create_sum(k) # print "v: ", v # print "red: self: ", self if terms_reductions: # Invert dictionary of terms. reductions_terms = dict([((v[0], tuple(v[1])), k) for k, v in six.iteritems(terms_reductions)]) # Create a sorted list of those variables that give the highest # reduction. sorted_reduc_var = sorted(six.iterkeys(reductions_terms), reverse=True) # sorted_reduc_var = [k for k, v in six.iteritems(reductions_terms)] # print # print ind + "raw" # for k in sorted_reduc_var: # print ind, k[0], k[1] # sorted_reduc_var.sort() # sorted_reduc_var.sort(lambda x, y: cmp(x[0], y[0])) # sorted_reduc_var.reverse() # print ind + "sorted" # for k in sorted_reduc_var: # print ind, k[0], k[1] # Create a new dictionary of terms that should be reduced, if some # terms overlap, only pick the one which give the highest reduction to # ensure that a*x*x + b*x*x + x*x*y + 2*y -> x*x*(a + b + y) + 2*y NOT # x*x*(a + b) + y*(2 + x*x). reduction_vars = {} rejections = {} for var in sorted_reduc_var: terms = reductions_terms[var] if _overlap(terms, reduction_vars) or _overlap(terms, rejections): rejections[var[1]] = terms else: reduction_vars[var[1]] = terms # print "\nreduction_vars: " # for k,v in reduction_vars.items(): # print "k: ", k # print "v: ", v # Reduce each set of terms with appropriate variables. all_reduced_terms = [] reduced_expressions = [] for reduc_var, terms in sorted(six.iteritems(reduction_vars)): # Add current terms to list of all variables that have been reduced. all_reduced_terms += list(terms) # Create variable that we will use to reduce the terms. reduction_var = None if len(reduc_var) > 1: reduction_var = create_product(list(reduc_var)) else: reduction_var = reduc_var[0] # Reduce all terms that need to be reduced. reduced_terms = [t.reduce_var(reduction_var) for t in terms] # Create reduced expression. reduced_expr = None if len(reduced_terms) > 1: # Try to reduce the reduced terms further. reduced_expr = create_product([reduction_var, create_sum(reduced_terms).reduce_ops()]) else: reduced_expr = create_product(reduction_var, reduced_terms[0]) # Add reduced expression to list of reduced expressions. reduced_expressions.append(reduced_expr) # Create list of terms that should not be reduced. dont_reduce_terms = [] for v in new_sum.vrs: if not v in all_reduced_terms: dont_reduce_terms.append(v) # Create expression from terms that was not reduced. not_reduced_expr = None if dont_reduce_terms and len(dont_reduce_terms) > 1: # Try to reduce the remaining terms that were not reduced at first. not_reduced_expr = create_sum(dont_reduce_terms).reduce_ops() elif dont_reduce_terms: not_reduced_expr = dont_reduce_terms[0] # Create return expression. if not_reduced_expr: self._reduced = create_sum(reduced_expressions + [not_reduced_expr]) elif len(reduced_expressions) > 1: self._reduced = create_sum(reduced_expressions) else: self._reduced = reduced_expressions[0] # # NOTE: Only switch on for debugging. # if not self._reduced.expand() == self.expand(): # print reduced_expressions[0] # print reduced_expressions[0].expand() # print "self: ", self # print "red: ", repr(self._reduced) # print "self.exp: ", self.expand() # print "red.exp: ", self._reduced.expand() # error("Reduced expression is not equal to original expression.") return self._reduced # Return self if we don't have any variables for which we can reduce # the sum. self._reduced = self return self._reduced def reduce_vartype(self, var_type): """Reduce expression with given var_type. It returns a list of tuples [(found, remain)], where 'found' is an expression that only has variables of type == var_type. If no variables are found, found=(). The 'remain' part contains the leftover after division by 'found' such that: self = Sum([f*r for f,r in self.reduce_vartype(Type)]).""" found = {} # print "\nself: ", self # Loop members and reduce them by vartype. for v in self.vrs: # print "v: ", v # print "red: ", v.reduce_vartype(var_type) # red = v.reduce_vartype(var_type) # f, r = v.reduce_vartype(var_type) # print "len red: ", len(red) # print "red: ", red # if len(red) == 2: # f, r = red # else: # raise RuntimeError for f, r in v.reduce_vartype(var_type): if f in found: found[f].append(r) else: found[f] = [r] # Create the return value. returns = [] for f, r in sorted_by_key(found): if len(r) > 1: # Use expand to group expressions. # r = create_sum(r).expand() r = create_sum(r) elif r: r = r.pop() returns.append((f, r)) return sorted(returns) def _overlap(l, d): "Check if a member in list l is in the value (list) of dictionary d." for m in l: for k, v in sorted_by_key(d): if m in v: return True return False def _group_fractions(expr): "Group Fractions in a Sum: 2/x + y/x -> (2 + y)/x." if expr._prec != 3: # sum return expr # Loop variables and group those with common denominator. not_frac = [] fracs = {} for v in expr.vrs: if v._prec == 4: # frac if v.denom in fracs: fracs[v.denom][1].append(v.num) fracs[v.denom][0] += 1 else: fracs[v.denom] = [1, [v.num], v] continue not_frac.append(v) if not fracs: return expr # Loop all fractions and create new ones using an appropriate numerator. for k, v in sorted(six.iteritems(fracs)): if v[0] > 1: # TODO: Is it possible to avoid expanding the Sum? # I think we have to because x/a + 2*x/a -> 3*x/a. not_frac.append(create_fraction(create_sum(v[1]).expand(), k)) else: not_frac.append(v[2]) # Create return value. if len(not_frac) > 1: return create_sum(not_frac) return not_frac[0] from .floatvalue import FloatValue from .symbol import Symbol from .product import Product from .fraction import Fraction ffc-1.6.0/ffc/quadrature/symbol.py000066400000000000000000000206071255571034100170560ustar00rootroot00000000000000"This file implements a class to represent a symbol." # Copyright (C) 2009-2011 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2009-07-12 # Last changed: 2011-06-28 # FFC modules. from ffc.log import error from ffc.cpp import format # FFC quadrature modules. from .symbolics import type_to_string from .symbolics import create_float from .symbolics import create_product from .symbolics import create_sum from .symbolics import create_fraction from .expr import Expr class Symbol(Expr): __slots__ = ("v", "base_expr", "base_op", "exp", "cond") def __init__(self, variable, symbol_type, base_expr=None, base_op=0): """Initialise a Symbols object, it derives from Expr and contains the additional variables: v - string, variable name base_expr - Other expression type like 'x*y + z' base_op - number of operations for the symbol itself if it's a math operation like std::cos(.) -> base_op = 1. NOTE: self._prec = 1.""" # Dummy value, a symbol is always one. self.val = 1.0 # Initialise variable, type and class. self.v = variable self.t = symbol_type self._prec = 1 # Needed for symbols like std::cos(x*y + z), # where base_expr = x*y + z. # ops = base_expr.ops() + base_ops = 2 + 1 = 3 self.base_expr = base_expr self.base_op = base_op # If type of the base_expr is lower than the given symbol_type change type. # TODO: Should we raise an error here? Or simply require that one # initalise the symbol by Symbol('std::cos(x*y)', (x*y).t, x*y, 1). if base_expr and base_expr.t < self.t: self.t = base_expr.t # Compute the representation now, such that we can use it directly # in the __eq__ and __ne__ methods (improves performance a bit, but # only when objects are cached). if self.base_expr:# and self.exp is None: self._repr = "Symbol('%s', %s, %s, %d)" % (self.v, type_to_string[self.t],\ self.base_expr._repr, self.base_op) else: self._repr = "Symbol('%s', %s)" % (self.v, type_to_string[self.t]) # Use repr as hash value. self._hash = hash(self._repr) # Print functions. def __str__(self): "Simple string representation which will appear in the generated code." # print "sym str: ", self.v return self.v # Binary operators. def __add__(self, other): "Addition by other objects." # NOTE: We expect expanded objects # symbols, if other is a product, try to let product handle the addition. # Returns x + x -> 2*x, x + 2*x -> 3*x. if self._repr == other._repr: return create_product([create_float(2), self]) elif other._prec == 2: # prod return other.__add__(self) return create_sum([self, other]) def __sub__(self, other): "Subtract other objects." # NOTE: We expect expanded objects # symbols, if other is a product, try to let product handle the addition. if self._repr == other._repr: return create_float(0) elif other._prec == 2: # prod if other.get_vrs() == (self,): return create_product([create_float(1.0 - other.val), self]).expand() return create_sum([self, create_product([create_float(-1), other])]) def __mul__(self, other): "Multiplication by other objects." # NOTE: We assume expanded objects. # If product will be zero. if self.val == 0.0 or other.val == 0.0: return create_float(0) # If other is Sum or Fraction let them handle the multiply. if other._prec in (3, 4): # sum or frac return other.__mul__(self) # If other is a float or symbol, create simple product. if other._prec in (0, 1): # float or sym return create_product([self, other]) # Else add variables from product. return create_product([self] + other.vrs) def __truediv__(self, other): "Division by other objects." # NOTE: We assume expanded objects. # If division is illegal (this should definitely not happen). if other.val == 0.0: error("Division by zero.") # Return 1 if the two symbols are equal. if self._repr == other._repr: return create_float(1) # If other is a Sum we can only return a fraction. # TODO: Refine this later such that x / (x + x*y) -> 1 / (1 + y)? if other._prec == 3: # sum return create_fraction(self, other) # Handle division by FloatValue, Symbol, Product and Fraction. # Create numerator and list for denominator. num = [self] denom = [] # Add floatvalue, symbol and products to the list of denominators. if other._prec in (0, 1): # float or sym denom = [other] elif other._prec == 2: # prod # Need copies, so can't just do denom = other.vrs. denom += other.vrs # fraction. else: # TODO: Should we also support division by fraction for generality? # It should not be needed by this module. error("Did not expected to divide by fraction.") # Remove one instance of self in numerator and denominator if # present in denominator i.e., x/(x*y) --> 1/y. if self in denom: denom.remove(self) num.remove(self) # Loop entries in denominator and move float value to numerator. for d in denom: # Add the inverse of a float to the numerator, remove it from # the denominator and continue. if d._prec == 0: # float num.append(create_float(1.0/other.val)) denom.remove(d) continue # Create appropriate return value depending on remaining data. # Can only be for x / (2*y*z) -> 0.5*x / (y*z). if len(num) > 1: num = create_product(num) # x / (y*z) -> x/(y*z), elif num: num = num[0] # else x / (x*y) -> 1/y. else: num = create_float(1) # If we have a long denominator, create product and fraction. if len(denom) > 1: return create_fraction(num, create_product(denom)) # If we do have a denominator, but only one variable don't create a # product, just return a fraction using the variable as denominator. elif denom: return create_fraction(num, denom[0]) # If we don't have any donominator left, return the numerator. # x / 2.0 -> 0.5*x. return num.expand() __div__ = __truediv__ # Public functions. def get_unique_vars(self, var_type): "Get unique variables (Symbols) as a set." # Return self if type matches, also return base expression variables. s = set() if self.t == var_type: s.add(self) if self.base_expr: s.update(self.base_expr.get_unique_vars(var_type)) return s def get_var_occurrences(self): """Determine the number of times all variables occurs in the expression. Returns a dictionary of variables and the number of times they occur.""" # There is only one symbol. return {self:1} def ops(self): "Returning the number of floating point operation for symbol." # Get base ops, typically 1 for sin() and then add the operations # for the base (sin(2*x + 1)) --> 2 + 1. if self.base_expr: return self.base_op + self.base_expr.ops() return self.base_op from .floatvalue import FloatValue from .product import Product from .sumobj import Sum from .fraction import Fraction ffc-1.6.0/ffc/quadrature/symbolics.py000066400000000000000000000277271255571034100175670ustar00rootroot00000000000000"This file contains functions to optimise the code generated for quadrature representation." # Copyright (C) 2009-2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . from ufl.utils.sorting import sorted_by_key # FFC modules from ffc.log import debug, error from ffc.cpp import format # TODO: Use proper errors, not just RuntimeError. # TODO: Change all if value == 0.0 to something more safe. # Some basic variables. BASIS = 0 IP = 1 GEO = 2 CONST = 3 type_to_string = {BASIS:"BASIS", IP:"IP",GEO:"GEO", CONST:"CONST"} # Functions and dictionaries for cache implementation. # Increases speed and should also reduce memory consumption. _float_cache = {} def create_float(val): if val in _float_cache: # print "found %f in cache" %val return _float_cache[val] float_val = FloatValue(val) _float_cache[val] = float_val return float_val _symbol_cache = {} def create_symbol(variable, symbol_type, base_expr=None, base_op=0): key = (variable, symbol_type, base_expr, base_op) if key in _symbol_cache: # print "found %s in cache" %variable return _symbol_cache[key] symbol = Symbol(variable, symbol_type, base_expr, base_op) _symbol_cache[key] = symbol return symbol _product_cache = {} def create_product(variables): # NOTE: If I switch on the sorted line, it might be possible to find more # variables in the cache, but it adds some overhead so I don't think it # pays off. The member variables are also sorted in the classes # (Product and Sum) so the list 'variables' is probably already sorted. # key = tuple(sorted(variables)) key = tuple(variables) if key in _product_cache: # print "found %s in cache" %str(key) # print "found product in cache" return _product_cache[key] product = Product(key) _product_cache[key] = product return product _sum_cache = {} def create_sum(variables): # NOTE: If I switch on the sorted line, it might be possible to find more # variables in the cache, but it adds some overhead so I don't think it # pays off. The member variables are also sorted in the classes # (Product and Sum) so the list 'variables' is probably already sorted. # key = tuple(sorted(variables)) key = tuple(variables) if key in _sum_cache: # print "found %s in cache" %str(key) # print "found sum in cache" return _sum_cache[key] s = Sum(key) _sum_cache[key] = s return s _fraction_cache = {} def create_fraction(num, denom): key = (num, denom) if key in _fraction_cache: # print "found %s in cache" %str(key) # print "found fraction in cache" return _fraction_cache[key] fraction = Fraction(num, denom) _fraction_cache[key] = fraction return fraction # NOTE: We use commented print for debug, since debug will make the code run slower. def generate_aux_constants(constant_decl, name, var_type, print_ops=False): "A helper tool to generate code for constant declarations." format_comment = format["comment"] code = [] append = code.append ops = 0 for num, expr in sorted((v, k) for k, v in sorted_by_key(constant_decl)): # debug("expr orig: " + str(expr)) # print "\nnum: ", num # print "expr orig: " + repr(expr) # print "expr exp: " + str(expr.expand()) # Expand and reduce expression (If we don't already get reduced expressions.) expr = expr.expand().reduce_ops() # debug("expr opt: " + str(expr)) # print "expr opt: " + str(expr) if print_ops: op = expr.ops() ops += op append(format_comment("Number of operations: %d" %op)) append(var_type(name(num), str(expr))) append("") else: ops += expr.ops() append(var_type(name(num), str(expr))) return (ops, code) # NOTE: We use commented print for debug, since debug will make the code run slower. def optimise_code(expr, ip_consts, geo_consts, trans_set): """Optimise a given expression with respect to, basis functions, integration points variables and geometric constants. The function will update the dictionaries ip_const and geo_consts with new declarations and update the trans_set (used transformations).""" # print "expr: ", repr(expr) format_G = format["geometry constant"] # format_ip = format["integration points"] format_I = format["ip constant"] trans_set_update = trans_set.update # Return constant symbol if expanded value is zero. exp_expr = expr.expand() if exp_expr.val == 0.0: return create_float(0) # Reduce expression with respect to basis function variable. basis_expressions = exp_expr.reduce_vartype(BASIS) # If we had a product instance we'll get a tuple back so embed in list. if not isinstance(basis_expressions, list): basis_expressions = [basis_expressions] basis_vals = [] # Process each instance of basis functions. for basis, ip_expr in basis_expressions: # Get the basis and the ip expression. # debug("\nbasis\n" + str(basis)) # debug("ip_epxr\n" + str(ip_expr)) # print "\nbasis\n" + str(basis) # print "ip_epxr\n" + str(ip_expr) # print "ip_epxr\n" + repr(ip_expr) # print "ip_epxr\n" + repr(ip_expr.expand()) # If we have no basis (like functionals) create a const. if not basis: basis = create_float(1) # NOTE: Useful for debugging to check that terms where properly reduced. # if Product([basis, ip_expr]).expand() != expr.expand(): # prod = Product([basis, ip_expr]).expand() # print "prod == sum: ", isinstance(prod, Sum) # print "expr == sum: ", isinstance(expr, Sum) # print "prod.vrs: ", prod.vrs # print "expr.vrs: ", expr.vrs # print "expr.vrs = prod.vrs: ", expr.vrs == prod.vrs # print "equal: ", prod == expr # print "\nprod: ", prod # print "\nexpr: ", expr # print "\nbasis: ", basis # print "\nip_expr: ", ip_expr # error("Not equal") # If the ip expression doesn't contain any operations skip remainder. # if not ip_expr: if not ip_expr or ip_expr.val == 0.0: basis_vals.append(basis) continue if not ip_expr.ops() > 0: basis_vals.append(create_product([basis, ip_expr])) continue # Reduce the ip expressions with respect to IP variables. ip_expressions = ip_expr.expand().reduce_vartype(IP) # If we had a product instance we'll get a tuple back so embed in list. if not isinstance(ip_expressions, list): ip_expressions = [ip_expressions] # # Debug code to check that reduction didn't screw up anything # for ip in ip_expressions: # ip_dec, geo = ip # print "geo: ", geo # print "ip_dec: ", ip_dec # vals = [] # for ip in ip_expressions: # ip_dec, geo = ip # if ip_dec and geo: # vals.append(Product([ip_dec, geo])) # elif geo: # vals.append(geo) # elif ip_dec: # vals.append(ip_dec) # if Sum(vals).expand() != ip_expr.expand(): ## if Sum([Product([ip, geo]) for ip, geo in ip_expressions]).expand() != ip_expr.expand(): # print "\nip_expr: ", repr(ip_expr) ## print "\nip_expr: ", str(ip_expr) ## print "\nip_dec: ", repr(ip_dec) ## print "\ngeo: ", repr(geo) # for ip in ip_expressions: # ip_dec, geo = ip # print "geo: ", geo # print "ip_dec: ", ip_dec # error("Not equal") ip_vals = [] # Loop ip expressions. for ip in sorted(ip_expressions): ip_dec, geo = ip # debug("\nip_dec: " + str(ip_dec)) # debug("\ngeo: " + str(geo)) # print "\nip_dec: " + repr(ip_dec) # print "\ngeo: " + repr(geo) # print "exp: ", geo.expand() # print "val: ", geo.expand().val # print "repx: ", repr(geo.expand()) # NOTE: Useful for debugging to check that terms where properly reduced. # if Product([ip_dec, geo]).expand() != ip_expr.expand(): # print "\nip_expr: ", repr(ip_expr) # print "\nip_dec: ", repr(ip_dec) # print "\ngeo: ", repr(geo) # error("Not equal") # Update transformation set with those values that might be embedded in IP terms. # if ip_dec: if ip_dec and ip_dec.val != 0.0: trans_set_update([str(x) for x in ip_dec.get_unique_vars(GEO)]) # Append and continue if we did not have any geo values. # if not geo: if not geo or geo.val == 0.0: if ip_dec and ip_dec.val != 0.0: ip_vals.append(ip_dec) continue # Update the transformation set with the variables in the geo term. trans_set_update([str(x) for x in geo.get_unique_vars(GEO)]) # Only declare auxiliary geo terms if we can save operations. # geo = geo.expand().reduce_ops() if geo.ops() > 0: # debug("geo: " + str(geo)) # print "geo: " + str(geo) # If the geo term is not in the dictionary append it. # if not geo in geo_consts: if not geo in geo_consts: geo_consts[geo] = len(geo_consts) # Substitute geometry expression. geo = create_symbol(format_G(geo_consts[geo]), GEO) # If we did not have any ip_declarations use geo, else create a # product and append to the list of ip_values. # if not ip_dec: if not ip_dec or ip_dec.val == 0.0: ip_dec = geo else: ip_dec = create_product([ip_dec, geo]) ip_vals.append(ip_dec) # Create sum of ip expressions to multiply by basis. if len(ip_vals) > 1: ip_expr = create_sum(ip_vals) elif ip_vals: ip_expr = ip_vals.pop() # If we can save operations by declaring it as a constant do so, if it # is not in IP dictionary, add it and use new name. # ip_expr = ip_expr.expand().reduce_ops() # if ip_expr.ops() > 0: if ip_expr.ops() > 0 and ip_expr.val != 0.0: # if not ip_expr in ip_consts: if not ip_expr in ip_consts: ip_consts[ip_expr] = len(ip_consts) # Substitute ip expression. # ip_expr = create_symbol(format_G + format_ip + str(ip_consts[ip_expr]), IP) ip_expr = create_symbol(format_I(ip_consts[ip_expr]), IP) # Multiply by basis and append to basis vals. # prod = create_product([basis, ip_expr]) # if prod.expand().val != 0.0: # basis_vals.append(prod) basis_vals.append(create_product([basis, ip_expr])) # Return (possible) sum of basis values. if len(basis_vals) > 1: return create_sum(basis_vals) elif basis_vals: return basis_vals[0] # Where did the values go? error("Values disappeared.") from .floatvalue import FloatValue from .symbol import Symbol from .product import Product from .sumobj import Sum from .fraction import Fraction ffc-1.6.0/ffc/quadrature/tabulate_basis.py000066400000000000000000000324211255571034100205300ustar00rootroot00000000000000"Quadrature representation class for UFL" # Copyright (C) 2009-2014 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Anders Logg, 2009. # Modified by Martin Alnaes, 2013-2014 import numpy, itertools # UFL modules import ufl from ufl.cell import Cell from ufl.classes import Grad, CellAvg, FacetAvg from ufl.algorithms import extract_unique_elements, extract_type, extract_elements # FFC modules from ffc.log import ffc_assert, info, error, warning from ffc.utils import product from ffc.fiatinterface import create_element from ffc.fiatinterface import map_facet_points, reference_cell_vertices from ffc.quadrature_schemes import create_quadrature def _create_quadrature_points_and_weights(integral_type, cellname, facet_cellname, degree, rule): if integral_type == "cell": (points, weights) = create_quadrature(cellname, degree, rule) elif integral_type == "exterior_facet" or integral_type == "interior_facet": (points, weights) = create_quadrature(facet_cellname, degree, rule) elif integral_type == "vertex": (points, weights) = ([()], numpy.array([1.0,])) # TODO: Will be fixed elif integral_type == "custom": (points, weights) = (None, None) else: error("Unknown integral type: " + str(integral_type)) return (points, weights) def _find_element_derivatives(expr, elements, element_replace_map): "Find the highest derivatives of given elements in expression." # TODO: This is most likely not the best way to get the highest # derivative of an element, but it works! # Initialise dictionary of elements and the number of derivatives. # (Note that elements are already mapped through the element_replace_map) num_derivatives = dict((e, 0) for e in elements) # Extract the derivatives from the integral. derivatives = set(extract_type(expr, Grad)) # Loop derivatives and extract multiple derivatives. for d in list(derivatives): # After UFL has evaluated derivatives, only one element # can be found inside any single Grad expression elem, = extract_elements(d.ufl_operands[0]) elem = element_replace_map[elem] # Set the number of derivatives to the highest value encountered so far. num_derivatives[elem] = max(num_derivatives[elem], len(extract_type(d, Grad))) return num_derivatives def domain_to_entity_dim(integral_type, tdim): if integral_type == "cell": entity_dim = tdim elif (integral_type == "exterior_facet" or integral_type == "interior_facet"): entity_dim = tdim - 1 elif integral_type == "vertex": entity_dim = 0 elif integral_type == "custom": entity_dim = tdim else: error("Unknown integral_type: %s" % integral_type) return entity_dim def _map_entity_points(cellname, tdim, points, entity_dim, entity): # Not sure if this is useful anywhere else than in _tabulate_psi_table! if entity_dim == tdim: return points elif entity_dim == tdim-1: return map_facet_points(points, entity) elif entity_dim == 0: return (reference_cell_vertices(cellname)[entity],) def _tabulate_empty_psi_table(tdim, deriv_order, element): "Tabulate psi table when there are no points" # All combinations of partial derivatives up to given order gdim = tdim # hack, consider passing gdim variable here derivs = [d for d in itertools.product(*(gdim*[list(range(0, deriv_order + 1))]))] derivs = [d for d in derivs if sum(d) <= deriv_order] # Return empty table table = {} for d in derivs: value_shape = element.value_shape() if value_shape == (): table[d] = [[]] else: value_size = product(value_shape) table[d] = [[[] for c in range(value_size)]] return {None: table} def _tabulate_psi_table(integral_type, cellname, tdim, element, deriv_order, points): "Tabulate psi table for different integral types." # MSA: I attempted to generalize this function, could this way of # handling domain types generically extend to other parts of the code? # Handle case when list of points is empty if points is None: return _tabulate_empty_psi_table(tdim, deriv_order, element) # Otherwise, call FIAT to tabulate entity_dim = domain_to_entity_dim(integral_type, tdim) num_entities = Cell(cellname).num_entities(entity_dim) psi_table = {} for entity in range(num_entities): entity_points = _map_entity_points(cellname, tdim, points, entity_dim, entity) # TODO: Use 0 as key for cell and we may be able to generalize other places: key = None if integral_type == "cell" else entity psi_table[key] = element.tabulate(deriv_order, entity_points) return psi_table def _tabulate_entities(integral_type, cellname, tdim): "Tabulate psi table for different integral types." # MSA: I attempted to generalize this function, could this way of # handling domain types generically extend to other parts of the code? entity_dim = domain_to_entity_dim(integral_type, tdim) num_entities = Cell(cellname).num_entities(entity_dim) entities = set() for entity in range(num_entities): # TODO: Use 0 as key for cell and we may be able to generalize other places: key = None if integral_type == "cell" else entity entities.add(key) return entities def insert_nested_dict(root, keys, value): for k in keys[:-1]: d = root.get(k) if d is None: d = {} root[k] = d root = d root[keys[-1]] = value # MSA: This function is in serious need for some refactoring and splitting up. # Or perhaps I should just add a new implementation for uflacs, # but I'd rather not have two versions to maintain. def tabulate_basis(sorted_integrals, form_data, itg_data): "Tabulate the basisfunctions and derivatives." # MER: Note to newbies: this code assumes that each integral in # the dictionary of sorted_integrals that enters here, has a # unique number of quadrature points ... # Initialise return values. quadrature_rules = {} psi_tables = {} integrals = {} avg_elements = { "cell": [], "facet": [] } integral_type = itg_data.integral_type cellname = itg_data.domain.cell().cellname() facet_cellname = itg_data.domain.cell().facet_cellname() tdim = itg_data.domain.topological_dimension() # Loop the quadrature points and tabulate the basis values. rules = sorted(sorted_integrals.keys()) for degree, scheme in rules: # --------- Creating quadrature rule # Make quadrature rule and get points and weights. (points, weights) = _create_quadrature_points_and_weights(integral_type, cellname, facet_cellname, degree, scheme) # The TOTAL number of weights/points len_weights = None if weights is None else len(weights) # Add points and rules to dictionary ffc_assert(len_weights not in quadrature_rules, "This number of points is already present in the weight table: " + repr(quadrature_rules)) quadrature_rules[len_weights] = (weights, points) # --------- Store integral # Add the integral with the number of points as a key to the return integrals. integral = sorted_integrals[(degree, scheme)] ffc_assert(len_weights not in integrals, \ "This number of points is already present in the integrals: " + repr(integrals)) integrals[len_weights] = integral # --------- Analyse UFL elements in integral # Get all unique elements in integral. ufl_elements = [form_data.element_replace_map[e] for e in extract_unique_elements(integral)] # Insert elements for x and J domain = integral.domain() # FIXME: For all domains to be sure? Better to rewrite though. x_element = domain.coordinate_element() if x_element not in ufl_elements: if integral_type == "custom": # FIXME: Not yet implemented, in progress warning("Vector elements not yet supported in custom integrals so element for coordinate function x will not be generated.") else: ufl_elements.append(x_element) # Find all CellAvg and FacetAvg in integrals and extract elements for avg, AvgType in (("cell", CellAvg), ("facet", FacetAvg)): expressions = extract_type(integral, AvgType) avg_elements[avg] = [form_data.element_replace_map[e] for expr in expressions for e in extract_unique_elements(expr)] # Find the highest number of derivatives needed for each element num_derivatives = _find_element_derivatives(integral.integrand(), ufl_elements, form_data.element_replace_map) # Need at least 1 for the Jacobian num_derivatives[x_element] = max(num_derivatives.get(x_element,0), 1) # --------- Evaluate FIAT elements in quadrature points and store in tables # Add the number of points to the psi tables dictionary. ffc_assert(len_weights not in psi_tables, \ "This number of points is already present in the psi table: " + repr(psi_tables)) psi_tables[len_weights] = {} # Loop FIAT elements and tabulate basis as usual. for ufl_element in ufl_elements: fiat_element = create_element(ufl_element) # Tabulate table of basis functions and derivatives in points psi_table = _tabulate_psi_table(integral_type, cellname, tdim, fiat_element, num_derivatives[ufl_element], points) # Insert table into dictionary based on UFL elements. (None=not averaged) psi_tables[len_weights][ufl_element] = { None: psi_table } # Loop over elements found in CellAvg and tabulate basis averages len_weights = 1 for avg in ("cell", "facet"): # Doesn't matter if it's exterior or interior if avg == "cell": avg_integral_type = "cell" elif avg == "facet": avg_integral_type = "exterior_facet" for element in avg_elements[avg]: fiat_element = create_element(element) # Make quadrature rule and get points and weights. (points, weights) = _create_quadrature_points_and_weights(avg_integral_type, cellname, facet_cellname, element.degree(), "default") wsum = sum(weights) # Tabulate table of basis functions and derivatives in points entity_psi_tables = _tabulate_psi_table(avg_integral_type, cellname, tdim, fiat_element, 0, points) rank = len(element.value_shape()) # Hack, duplicating table with per-cell values for each facet in the case of cell_avg(f) in a facet integral actual_entities = _tabulate_entities(integral_type, cellname, tdim) if len(actual_entities) > len(entity_psi_tables): assert len(entity_psi_tables) == 1 assert avg_integral_type == "cell" assert "facet" in integral_type v, = sorted(entity_psi_tables.values()) entity_psi_tables = dict((e, v) for e in actual_entities) for entity, deriv_table in sorted(entity_psi_tables.items()): deriv, = sorted(deriv_table.keys()) # Not expecting derivatives of averages psi_table = deriv_table[deriv] if rank: # Compute numeric integral num_dofs, num_components, num_points = psi_table.shape ffc_assert(num_points == len(weights), "Weights and table shape does not match.") avg_psi_table = numpy.asarray([[[numpy.dot(psi_table[j,k,:], weights) / wsum] for k in range(num_components)] for j in range(num_dofs)]) else: # Compute numeric integral num_dofs, num_points = psi_table.shape ffc_assert(num_points == len(weights), "Weights and table shape does not match.") avg_psi_table = numpy.asarray([[numpy.dot(psi_table[j,:], weights) / wsum] for j in range(num_dofs)]) # Insert table into dictionary based on UFL elements. insert_nested_dict(psi_tables, (len_weights, element, avg, entity, deriv), avg_psi_table) return (integrals, psi_tables, quadrature_rules) ffc-1.6.0/ffc/quadrature_schemes.py000066400000000000000000000346241255571034100172640ustar00rootroot00000000000000"""Quadrature schemes on cells This module generates quadrature schemes on reference cells that integrate exactly a polynomial of a given degree using a specified scheme. The UFC definition of a reference cell is used. Scheme options are: scheme="default" scheme="canonical" (collapsed Gauss scheme supplied by FIAT) Background on the schemes: Keast rules for tetrahedra: Keast, P. Moderate-degree tetrahedral quadrature formulas, Computer Methods in Applied Mechanics and Engineering 55(3):339-348, 1986. http://dx.doi.org/10.1016/0045-7825(86)90059-9 """ # Copyright (C) 2011 Garth N. Wells # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2011-04-19 # Last changed: 2011-04-19 # NumPy from numpy import array, arange, float64 # UFL import ufl # FFC modules from ffc.log import debug, error from ffc.fiatinterface import reference_cell from ffc.fiatinterface import create_quadrature as fiat_create_quadrature # Dictionary mapping from cellname to dimension from ufl.cell import cellname2dim def create_quadrature(shape, degree, scheme="default"): """ Generate quadrature rule (points, weights) for given shape that will integrate an polynomial of order 'degree' exactly. """ # FIXME: KBO: Can this be handled more elegantly? # Handle point case if isinstance(shape, int) and shape == 0 or cellname2dim[shape] == 0: return ([()], array([1.0,])) if scheme == "default": if shape == "tetrahedron": return _tetrahedron_scheme(degree) elif shape == "triangle": return _triangle_scheme(degree) else: return _fiat_scheme(shape, degree) elif scheme == "vertex": # The vertex scheme, i.e., averaging the function value in the vertices # and multiplying with the simplex volume, is only of order 1 and # inferior to other generic schemes in terms of error reduction. # Equation systems generated with the vertex scheme have some # properties that other schemes lack, e.g., the mass matrix is # a simple diagonal matrix. This may be prescribed in certain cases. # if degree > 1: from warnings import warn warn(("Explicitly selected vertex quadrature (degree 1), " +"but requested degree is %d.") % degree) if shape == "tetrahedron": return (array([ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0] ]), array([1.0/24.0, 1.0/24.0, 1.0/24.0, 1.0/24.0]) ) elif shape == "triangle": return (array([ [0.0, 0.0], [1.0, 0.0], [0.0, 1.0] ]), array([1.0/6.0, 1.0/6.0, 1.0/6.0]) ) else: # Trapezoidal rule. return (array([ [0.0, 0.0], [0.0, 1.0] ]), array([1.0/2.0, 1.0/2.0]) ) elif scheme == "canonical": return _fiat_scheme(shape, degree) else: error("Unknown quadrature scheme: %s." % scheme) def _fiat_scheme(shape, degree): """Get quadrature scheme from FIAT interface""" # Number of points per axis for exact integration num_points_per_axis = (degree + 1 + 1) // 2 # Create and return FIAT quadrature rulet return fiat_create_quadrature(shape, num_points_per_axis) def _triangle_scheme(degree): """Return a quadrature scheme on a triangle of specified order. Falls back on canonical rule for higher orders.""" if degree == 0 or degree == 1: # Scheme from Zienkiewicz and Taylor, 1 point, degree of precision 1 x = array([ [1.0/3.0, 1.0/3.0] ]) w = array([0.5]) elif degree == 2: # Scheme from Strang and Fix, 3 points, degree of precision 2 x = array([ [1.0/6.0, 1.0/6.0], [1.0/6.0, 2.0/3.0], [2.0/3.0, 1.0/6.0] ]) w = arange(3, dtype=float64) w[:] = 1.0/6.0 elif degree == 3: # Scheme from Strang and Fix, 6 points, degree of precision 3 x = array([ [0.659027622374092, 0.231933368553031], [0.659027622374092, 0.109039009072877], [0.231933368553031, 0.659027622374092], [0.231933368553031, 0.109039009072877], [0.109039009072877, 0.659027622374092], [0.109039009072877, 0.231933368553031] ]) w = arange(6, dtype=float64) w[:] = 1.0/12.0 elif degree == 4: # Scheme from Strang and Fix, 6 points, degree of precision 4 x = array([ [0.816847572980459, 0.091576213509771], [0.091576213509771, 0.816847572980459], [0.091576213509771, 0.091576213509771], [0.108103018168070, 0.445948490915965], [0.445948490915965, 0.108103018168070], [0.445948490915965, 0.445948490915965] ]) w = arange(6, dtype=float64) w[0:3] = 0.109951743655322 w[3:6] = 0.223381589678011 w = w/2.0 elif degree == 5: # Scheme from Strang and Fix, 7 points, degree of precision 5 x = array([ [0.33333333333333333, 0.33333333333333333], [0.79742698535308720, 0.10128650732345633], [0.10128650732345633, 0.79742698535308720], [0.10128650732345633, 0.10128650732345633], [0.05971587178976981, 0.47014206410511505], [0.47014206410511505, 0.05971587178976981], [0.47014206410511505, 0.47014206410511505] ]) w = arange(7, dtype=float64) w[0] = 0.22500000000000000 w[1:4] = 0.12593918054482717 w[4:7] = 0.13239415278850616 w = w/2.0 elif degree == 6: # Scheme from Strang and Fix, 12 points, degree of precision 6 x = array([ [0.873821971016996, 0.063089014491502], [0.063089014491502, 0.873821971016996], [0.063089014491502, 0.063089014491502], [0.501426509658179, 0.249286745170910], [0.249286745170910, 0.501426509658179], [0.249286745170910, 0.249286745170910], [0.636502499121399, 0.310352451033785], [0.636502499121399, 0.053145049844816], [0.310352451033785, 0.636502499121399], [0.310352451033785, 0.053145049844816], [0.053145049844816, 0.636502499121399], [0.053145049844816, 0.310352451033785] ]) w = arange(12, dtype=float64) w[0:3] = 0.050844906370207 w[3:6] = 0.116786275726379 w[6:12] = 0.082851075618374 w = w/2.0 else: # Get canonical scheme x, w = _fiat_scheme("triangle", degree) # Return scheme return x, w def _tetrahedron_scheme(degree): """Return a quadrature scheme on a tetrahedron of specified degree. Falls back on canonical rule for higher orders""" if degree == 0 or degree == 1: # Scheme from Zienkiewicz and Taylor, 1 point, degree of precision 1 x = array([ [1.0/4.0, 1.0/4.0, 1.0/4.0] ]) w = array([1.0/6.0]) elif degree == 2: # Scheme from Zienkiewicz and Taylor, 4 points, degree of precision 2 a, b = 0.585410196624969, 0.138196601125011 x = array([ [a, b, b], [b, a, b], [b, b, a], [b, b, b] ]) w = arange(4, dtype=float64) w[:] = 1.0/24.0 elif degree == 3: # Scheme from Zienkiewicz and Taylor, 5 points, degree of precision 3 # Note: this scheme has a negative weight x = array([ [0.2500000000000000, 0.2500000000000000, 0.2500000000000000], [0.5000000000000000, 0.1666666666666666, 0.1666666666666666], [0.1666666666666666, 0.5000000000000000, 0.1666666666666666], [0.1666666666666666, 0.1666666666666666, 0.5000000000000000], [0.1666666666666666, 0.1666666666666666, 0.1666666666666666] ]) w = arange(5, dtype=float64) w[0] = -0.8 w[1:5] = 0.45 w = w/6.0 elif degree == 4: # Keast rule, 14 points, degree of precision 4 # Values taken from http://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tet/quadrature_rules_tet.html # (KEAST5) x = array([ [0.0000000000000000, 0.5000000000000000, 0.5000000000000000], [0.5000000000000000, 0.0000000000000000, 0.5000000000000000], [0.5000000000000000, 0.5000000000000000, 0.0000000000000000], [0.5000000000000000, 0.0000000000000000, 0.0000000000000000], [0.0000000000000000, 0.5000000000000000, 0.0000000000000000], [0.0000000000000000, 0.0000000000000000, 0.5000000000000000], [0.6984197043243866, 0.1005267652252045, 0.1005267652252045], [0.1005267652252045, 0.1005267652252045, 0.1005267652252045], [0.1005267652252045, 0.1005267652252045, 0.6984197043243866], [0.1005267652252045, 0.6984197043243866, 0.1005267652252045], [0.0568813795204234, 0.3143728734931922, 0.3143728734931922], [0.3143728734931922, 0.3143728734931922, 0.3143728734931922], [0.3143728734931922, 0.3143728734931922, 0.0568813795204234], [0.3143728734931922, 0.0568813795204234, 0.3143728734931922] ]) w = arange(14, dtype=float64) w[0:6] = 0.0190476190476190 w[6:10] = 0.0885898247429807 w[10:14] = 0.1328387466855907 w = w/6.0 elif degree == 5: # Keast rule, 15 points, degree of precision 5 # Values taken from http://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tet/quadrature_rules_tet.html # (KEAST6) x = array([ [0.2500000000000000, 0.2500000000000000, 0.2500000000000000], [0.0000000000000000, 0.3333333333333333, 0.3333333333333333], [0.3333333333333333, 0.3333333333333333, 0.3333333333333333], [0.3333333333333333, 0.3333333333333333, 0.0000000000000000], [0.3333333333333333, 0.0000000000000000, 0.3333333333333333], [0.7272727272727273, 0.0909090909090909, 0.0909090909090909], [0.0909090909090909, 0.0909090909090909, 0.0909090909090909], [0.0909090909090909, 0.0909090909090909, 0.7272727272727273], [0.0909090909090909, 0.7272727272727273, 0.0909090909090909], [0.4334498464263357, 0.0665501535736643, 0.0665501535736643], [0.0665501535736643, 0.4334498464263357, 0.0665501535736643], [0.0665501535736643, 0.0665501535736643, 0.4334498464263357], [0.0665501535736643, 0.4334498464263357, 0.4334498464263357], [0.4334498464263357, 0.0665501535736643, 0.4334498464263357], [0.4334498464263357, 0.4334498464263357, 0.0665501535736643] ]) w = arange(15, dtype=float64) w[0] = 0.1817020685825351 w[1:5] = 0.0361607142857143 w[5:9] = 0.0698714945161738 w[9:15] = 0.0656948493683187 w = w/6.0 elif degree == 6: # Keast rule, 24 points, degree of precision 6 # Values taken from http://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tet/quadrature_rules_tet.html # (KEAST7) x = array([ [0.3561913862225449, 0.2146028712591517, 0.2146028712591517], [0.2146028712591517, 0.2146028712591517, 0.2146028712591517], [0.2146028712591517, 0.2146028712591517, 0.3561913862225449], [0.2146028712591517, 0.3561913862225449, 0.2146028712591517], [0.8779781243961660, 0.0406739585346113, 0.0406739585346113], [0.0406739585346113, 0.0406739585346113, 0.0406739585346113], [0.0406739585346113, 0.0406739585346113, 0.8779781243961660], [0.0406739585346113, 0.8779781243961660, 0.0406739585346113], [0.0329863295731731, 0.3223378901422757, 0.3223378901422757], [0.3223378901422757, 0.3223378901422757, 0.3223378901422757], [0.3223378901422757, 0.3223378901422757, 0.0329863295731731], [0.3223378901422757, 0.0329863295731731, 0.3223378901422757], [0.2696723314583159, 0.0636610018750175, 0.0636610018750175], [0.0636610018750175, 0.2696723314583159, 0.0636610018750175], [0.0636610018750175, 0.0636610018750175, 0.2696723314583159], [0.6030056647916491, 0.0636610018750175, 0.0636610018750175], [0.0636610018750175, 0.6030056647916491, 0.0636610018750175], [0.0636610018750175, 0.0636610018750175, 0.6030056647916491], [0.0636610018750175, 0.2696723314583159, 0.6030056647916491], [0.2696723314583159, 0.6030056647916491, 0.0636610018750175], [0.6030056647916491, 0.0636610018750175, 0.2696723314583159], [0.0636610018750175, 0.6030056647916491, 0.2696723314583159], [0.2696723314583159, 0.0636610018750175, 0.6030056647916491], [0.6030056647916491, 0.2696723314583159, 0.0636610018750175] ]) w = arange(24, dtype=float64) w[0:4] = 0.0399227502581679 w[4:8] = 0.0100772110553207 w[8:12] = 0.0553571815436544 w[12:24] = 0.0482142857142857 w = w/6.0 else: # Get canonical scheme x, w = _fiat_scheme("tetrahedron", degree) # Return scheme return x, w ffc-1.6.0/ffc/quadratureelement.py000066400000000000000000000127551255571034100171300ustar00rootroot00000000000000# Copyright (C) 2007-2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Garth N. Wells 2006-2009 # # First added: 2007-12-10 # Last changed: 2010-01-30 # Python modules. import numpy # FIAT modules. from FIAT.functional import PointEvaluation # FFC modules. from .log import error, info_red # Default quadrature element degree default_quadrature_degree = 1 default_quadrature_scheme = "canonical" class QuadratureElement: """Write description of QuadratureElement""" def __init__(self, ufl_element): "Create QuadratureElement" # Compute number of points per axis from the degree of the element degree = ufl_element.degree() if degree is None: degree = default_quadrature_degree scheme = ufl_element.quadrature_scheme() if scheme is None: scheme = default_quadrature_scheme self._quad_scheme = scheme # Create quadrature (only interested in points) # TODO: KBO: What should we do about quadrature functions that live on ds, dS? # Get cell and facet names. domain, = ufl_element.domains() # Assuming single domain cellname = domain.cell().cellname() #facet_cellname = domain.cell().facet_cellname() points, weights = create_quadrature(cellname, degree, self._quad_scheme) # Save the quadrature points self._points = points # Create entity dofs. ufc_cell = reference_cell(cellname) self._entity_dofs = _create_entity_dofs(ufc_cell, len(points)) # The dual is a simply the PointEvaluation at the quadrature points # FIXME: KBO: Check if this gives expected results for code like evaluate_dof. self._dual = [PointEvaluation(ufc_cell, tuple(point)) for point in points] # Save the geometric dimension. # FIXME: KBO: Do we need to change this in order to integrate on facets? # MSA: Not the geometric dimension, but maybe the topological dimension somewhere? self._geometric_dimension = domain.geometric_dimension() def space_dimension(self): "The element space dimension is simply the number of quadrature points" return len(self._points) def value_shape(self): "The QuadratureElement is scalar valued" return () def entity_dofs(self): "Entity dofs are like that of DG, all internal to the cell" return self._entity_dofs def mapping(self): "The mapping is not really affine, but it is easier to handle the code generation this way." return ["affine"]*self.space_dimension() def dual_basis(self): "Return list of PointEvaluations" return self._dual def tabulate(self, order, points): """Return the identity matrix of size (num_quad_points, num_quad_points), in a format that monomialintegration and monomialtabulation understands.""" # Derivatives are not defined on a QuadratureElement # FIXME: currently this check results in a warning (should be RuntimeError) # because otherwise some forms fails if QuadratureElement is used in a # mixed element e.g., # element = CG + QuadratureElement # (v, w) = BasisFunctions(element) # grad(w): this is in error and should result in a runtime error # grad(v): this should be OK, but currently will raise a warning because # derivatives are tabulated for ALL elements in the mixed element. # This issue should be fixed in UFL and then we can switch on the # RuntimeError again. if order: # error("Derivatives are not defined on a QuadratureElement") info_red("\n*** WARNING: Derivatives are not defined on a QuadratureElement,") info_red(" returning values of basisfunction.\n") # Check that incoming points are equal to the quadrature points. if len(points) != len(self._points) or abs(numpy.array(points) - self._points).max() > 1e-12: print("\npoints:\n", numpy.array(points)) print("\nquad points:\n", self._points) error("Points must be equal to coordinates of quadrature points") # Return the identity matrix of size len(self._points) in a # suitable format for tensor and quadrature representations. values = numpy.eye(len(self._points)) return {(0,)*self._geometric_dimension: values} def _create_entity_dofs(fiat_cell, num_dofs): "This function is ripped from FIAT/discontinuous_lagrange.py" entity_dofs = {} top = fiat_cell.get_topology() for dim in sorted( top ): entity_dofs[dim] = {} for entity in sorted( top[dim] ): entity_dofs[dim][entity]=[] entity_dofs[dim][0] = list(range(num_dofs)) return entity_dofs # FFC modules to avoid circular import from ffc.fiatinterface import reference_cell from ffc.quadrature_schemes import create_quadrature ffc-1.6.0/ffc/representation.py000066400000000000000000000635561255571034100164500ustar00rootroot00000000000000""" Compiler stage 2: Code representation ------------------------------------- This module computes intermediate representations of forms, elements and dofmaps. For each UFC function, we extract the data needed for code generation at a later stage. The representation should conform strictly to the naming and order of functions in UFC. Thus, for code generation of the function "foo", one should only need to use the data stored in the intermediate representation under the key "foo". """ # Copyright (C) 2009-2015 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Marie E. Rognes 2010 # Modified by Kristian B. Oelgaard 2010 # Modified by Martin Alnaes, 2013-2015 # Modified by Lizao Li 2015 # Python modules from itertools import chain # Import UFL import ufl # FFC modules from ffc.utils import compute_permutations, product from ffc.log import info, error, begin, end, debug_ir, ffc_assert, warning from ffc.fiatinterface import create_element, reference_cell from ffc.mixedelement import MixedElement from ffc.enrichedelement import EnrichedElement, SpaceOfReals from ffc.fiatinterface import DiscontinuousLagrangeTrace from ffc.quadratureelement import QuadratureElement from ffc.cpp import set_float_formatting # List of supported integral types ufc_integral_types = ["cell", "exterior_facet", "interior_facet", "vertex", "custom"] def pick_representation(representation): "Return one of the specialized code generation modules from a representation string." if representation == "quadrature": from ffc import quadrature r = quadrature elif representation == "tensor": from ffc import tensor r = tensor elif representation == "uflacs": from ffc import uflacsrepr r = uflacsrepr else: error("Unknown representation: %s" % str(representation)) return r not_implemented = None def compute_ir(analysis, parameters): "Compute intermediate representation." begin("Compiler stage 2: Computing intermediate representation") # Set code generation parameters set_float_formatting(int(parameters["precision"])) # Extract data from analysis form_datas, elements, element_numbers = analysis # Compute representation of elements info("Computing representation of %d elements" % len(elements)) ir_elements = [_compute_element_ir(e, i, element_numbers) \ for (i, e) in enumerate(elements)] # Compute representation of dofmaps info("Computing representation of %d dofmaps" % len(elements)) ir_dofmaps = [_compute_dofmap_ir(e, i, element_numbers) for (i, e) in enumerate(elements)] # Compute and flatten representation of integrals info("Computing representation of integrals") irs = [_compute_integral_ir(fd, i, element_numbers, parameters) \ for (i, fd) in enumerate(form_datas)] ir_integrals = [ir for ir in chain(*irs) if not ir is None] # Compute representation of forms info("Computing representation of forms") ir_forms = [_compute_form_ir(fd, i, element_numbers) \ for (i, fd) in enumerate(form_datas)] end() return ir_elements, ir_dofmaps, ir_integrals, ir_forms def _compute_element_ir(ufl_element, element_id, element_numbers): "Compute intermediate representation of element." # Create FIAT element element = create_element(ufl_element) domain, = ufl_element.domains() # Assuming single domain cellname = domain.cell().cellname() # Store id ir = {"id": element_id} # Compute data for each function ir["signature"] = ufl_element.reconstruction_signature() ir["cell_shape"] = cellname ir["topological_dimension"] = domain.topological_dimension() ir["geometric_dimension"] = domain.geometric_dimension() ir["space_dimension"] = element.space_dimension() ir["value_rank"] = len(ufl_element.value_shape()) ir["value_dimension"] = ufl_element.value_shape() ir["evaluate_basis"] = _evaluate_basis(ufl_element, element) ir["evaluate_dof"] = _evaluate_dof(ufl_element, element) ir["interpolate_vertex_values"] = _interpolate_vertex_values(ufl_element, element) ir["num_sub_elements"] = ufl_element.num_sub_elements() ir["create_sub_element"] = _create_sub_foo(ufl_element, element_numbers) #debug_ir(ir, "finite_element") return ir def _compute_dofmap_ir(ufl_element, element_id, element_numbers): "Compute intermediate representation of dofmap." # Create FIAT element element = create_element(ufl_element) domain, = ufl_element.domains() # Assuming single domain cell = domain.cell() cellname = cell.cellname() # Precompute repeatedly used items num_dofs_per_entity = _num_dofs_per_entity(element) facet_dofs = _tabulate_facet_dofs(element, cell) # Store id ir = {"id": element_id} # Compute data for each function ir["signature"] = "FFC dofmap for " + ufl_element.reconstruction_signature() ir["needs_mesh_entities"] = _needs_mesh_entities(element) ir["topological_dimension"] = domain.topological_dimension() ir["geometric_dimension"] = domain.geometric_dimension() ir["global_dimension"] = _global_dimension(element) ir["num_element_dofs"] = element.space_dimension() ir["num_facet_dofs"] = len(facet_dofs[0]) ir["num_entity_dofs"] = num_dofs_per_entity ir["tabulate_dofs"] = _tabulate_dofs(element, cell) ir["tabulate_facet_dofs"] = facet_dofs ir["tabulate_entity_dofs"] = (element.entity_dofs(), num_dofs_per_entity) ir["tabulate_coordinates"] = _tabulate_coordinates(ufl_element, element) ir["num_sub_dofmaps"] = ufl_element.num_sub_elements() ir["create_sub_dofmap"] = _create_sub_foo(ufl_element, element_numbers) #debug_ir(ir, "dofmap") return ir def _global_dimension(element): "Compute intermediate representation for global_dimension." if not isinstance(element, MixedElement): if isinstance(element, SpaceOfReals): return ([], 1) return (_num_dofs_per_entity(element), 0) elements = [] reals = [] num_reals = 0 for (i, e) in enumerate(element.elements()): if not isinstance(e, SpaceOfReals): elements += [e] else: num_reals += 1 element = MixedElement(elements) return (_num_dofs_per_entity(element), num_reals) def _needs_mesh_entities(element): "Compute intermediate representation for needs_mesh_entities." # Note: The dof map for Real elements does not depend on the mesh num_dofs_per_entity = _num_dofs_per_entity(element) if isinstance(element, SpaceOfReals): return [False for d in num_dofs_per_entity] else: return [d > 0 for d in num_dofs_per_entity] def _compute_integral_ir(form_data, form_id, element_numbers, parameters): "Compute intermediate represention for form integrals." irs = [] # Iterate over integrals for itg_data in form_data.integral_data: # Select representation # TODO: Is it possible to detach this metadata from IntegralData? It's a bit strange from the ufl side. r = pick_representation(itg_data.metadata["representation"]) # Compute representation ir = r.compute_integral_ir(itg_data, form_data, form_id, element_numbers, parameters) # Append representation irs.append(ir) return irs def _compute_form_ir(form_data, form_id, element_numbers): "Compute intermediate representation of form." # Store id ir = {"id": form_id} # Compute common data ir["classname"] = "FooForm" ir["members"] = not_implemented ir["constructor"] = not_implemented ir["destructor"] = not_implemented ir["signature"] = form_data.original_form.signature() ir["rank"] = len(form_data.original_form.arguments()) ir["num_coefficients"] = len(form_data.reduced_coefficients) ir["original_coefficient_positions"] = form_data.original_coefficient_positions ir["create_finite_element"] = [element_numbers[e] for e in form_data.elements] ir["create_dofmap"] = [element_numbers[e] for e in form_data.elements] for integral_type in ufc_integral_types: ir["max_%s_subdomain_id" % integral_type] = _max_foo_subdomain_id(integral_type, form_data) ir["has_%s_integrals" % integral_type] = _has_foo_integrals(integral_type, form_data) ir["create_%s_integral" % integral_type] = _create_foo_integral(integral_type, form_data) ir["create_default_%s_integral" % integral_type] = _create_default_foo_integral(integral_type, form_data) return ir #--- Computation of intermediate representation for non-trivial functions --- # FIXME: Move to FiniteElement/MixedElement def _value_size(element): """Compute value size of element, aka the number of components. The value size of a scalar field is 1, the value size of a vector field (is the number of components), the value size of a higher dimensional tensor field is the product of the value_shape of the field. Recall that all mixed elements are flattened. """ shape = element.value_shape() if shape == (): return 1 return product(shape) def _generate_reference_offsets(element, offset=0): """Generate offsets: i.e value offset for each basis function relative to a reference element representation.""" offsets = [] if isinstance(element, MixedElement): for e in element.elements(): offsets += _generate_reference_offsets(e, offset) offset += _value_size(e) elif isinstance(element, EnrichedElement): for e in element.elements(): offsets += _generate_reference_offsets(e, offset) else: offsets = [offset]*element.space_dimension() return offsets def _generate_physical_offsets(ufl_element, offset=0): """Generate offsets: i.e value offset for each basis function relative to a physical element representation.""" offsets = [] # Refer to reference if gdim == tdim. This is a hack to support # more stuff (in particular restricted elements) domain, = ufl_element.domains() # Assuming single domain gdim = domain.geometric_dimension() tdim = domain.topological_dimension() if (gdim == tdim): return _generate_reference_offsets(create_element(ufl_element)) if isinstance(ufl_element, ufl.MixedElement): for e in ufl_element.sub_elements(): offsets += _generate_physical_offsets(e, offset) offset += _value_size(e) elif isinstance(ufl_element, ufl.EnrichedElement): for e in ufl_element._elements: offsets += _generate_physical_offsets(e, offset) elif isinstance(ufl_element, ufl.FiniteElement): element = create_element(ufl_element) offsets = [offset]*element.space_dimension() else: raise NotImplementedError("This element combination is not implemented") return offsets def _evaluate_dof(ufl_element, element): "Compute intermediate representation of evaluate_dof." # With regard to reference_value_size vs physical_value_size: Note # that 'element' is the FFC/FIAT representation of the finite # element, while 'ufl_element' is the UFL representation. In # particular, UFL only knows about physical dimensions, so the # value shape of the 'ufl_element' (which is used to compute the # _value_size) will be correspond to the value size in physical # space. FIAT however only knows about the reference element, and # so the FIAT value shape of the 'element' will be the reference # value size. This of course only matters for elements that have # different physical and reference value shapes and sizes. domain, = ufl_element.domains() # Assuming single domain return {"mappings": element.mapping(), "reference_value_size": _value_size(element), "physical_value_size": _value_size(ufl_element), "geometric_dimension": domain.geometric_dimension(), "topological_dimension": domain.topological_dimension(), "dofs": [L.pt_dict if L else None for L in element.dual_basis()], "physical_offsets": _generate_physical_offsets(ufl_element)} def _extract_elements(element): new_elements = [] if isinstance(element, (MixedElement, EnrichedElement)): for e in element.elements(): new_elements += _extract_elements(e) else: new_elements.append(element) return new_elements # def _num_components(element): # """Compute the number of components of element, like _value_size, but # does not support tensor elements.""" # shape = element.value_shape() # if shape == (): # return 1 # elif len(shape) == 1: # return shape[0] # else: # error("Tensor valued elements are not supported yet: %d " % shape) def _evaluate_basis(ufl_element, element): "Compute intermediate representation for evaluate_basis." domain, = ufl_element.domains() # Assuming single domain cellname = domain.cell().cellname() # Handle Mixed and EnrichedElements by extracting 'sub' elements. elements = _extract_elements(element) offsets = _generate_reference_offsets(element) # Must check? mappings = element.mapping() # This function is evidently not implemented for TensorElements for e in elements: if (len(e.value_shape()) > 1) and (e.num_sub_elements() != 1): return "Function not supported/implemented for TensorElements." # Handle QuadratureElement, not supported because the basis is only defined # at the dof coordinates where the value is 1, so not very interesting. for e in elements: if isinstance(e, QuadratureElement): return "Function not supported/implemented for QuadratureElement." if isinstance(e, DiscontinuousLagrangeTrace): return "Function not supported for Trace elements" # Initialise data with 'global' values. data = {"reference_value_size": _value_size(element), "physical_value_size": _value_size(ufl_element), "cellname" : cellname, "topological_dimension" : domain.topological_dimension(), "geometric_dimension" : domain.geometric_dimension(), "space_dimension" : element.space_dimension(), "needs_oriented": needs_oriented_jacobian(element), "max_degree": max([e.degree() for e in elements]) } # Loop element and space dimensions to generate dof data. dof = 0 dof_data = [] for e in elements: for i in range(e.space_dimension()): num_components = _value_size(e) coefficients = [] coeffs = e.get_coeffs() if (num_components > 1) and (len(e.value_shape()) == 1): # Handle coefficients for vector valued basis elements # [Raviart-Thomas, Brezzi-Douglas-Marini (BDM)]. for c in range(num_components): coefficients.append(coeffs[i][c]) elif (num_components > 1) and (len(e.value_shape()) == 2): # Handle coefficients for tensor valued basis elements. # [Regge] for p in range(e.value_shape()[0]): for q in range(e.value_shape()[1]): coefficients.append(coeffs[i][p][q]) else: coefficients.append(coeffs[i]) dof_data.append( { "embedded_degree" : e.degree(), "coeffs" : coefficients, "num_components" : num_components, "dmats" : e.dmats(), "mapping" : mappings[dof], "offset" : offsets[dof], "num_expansion_members": e.get_num_members(e.degree()) }) dof += 1 data["dof_data"] = dof_data return data def _tabulate_coordinates(ufl_element, element): "Compute intermediate representation of tabulate_coordinates." if uses_integral_moments(element) or not element.dual_basis()[0]: return {} domain, = ufl_element.domains() # Assuming single domain data = {} data["tdim"] = domain.topological_dimension() data["gdim"] = domain.geometric_dimension() data["points"] = [sorted(L.pt_dict.keys())[0] for L in element.dual_basis()] return data def _tabulate_dofs(element, cell): "Compute intermediate representation of tabulate_dofs." if isinstance(element, SpaceOfReals): return None # Extract number of entities for each dimension for this cell num_entities = cell.num_entities() # Extract number of dofs per entity for each element elements = all_elements(element) num_dofs_per_element = [_num_dofs_per_entity(e) for e in elements] # Extract local dof numbers per entity for each element all_entity_dofs = [e.entity_dofs() for e in elements] dofs_per_element = [[[list(dofs[dim][entity]) for entity in sorted(dofs[dim].keys())] for dim in sorted(dofs.keys())] for dofs in all_entity_dofs] # Check whether we need offset multiple_entities = any([sum(n > 0 for n in num_dofs) - 1 for num_dofs in num_dofs_per_element]) need_offset = len(elements) > 1 or multiple_entities num_dofs_per_element = [e.space_dimension() for e in elements] # Handle global "elements" fakes = [isinstance(e, SpaceOfReals) for e in elements] return (dofs_per_element, num_dofs_per_element, num_entities, need_offset, fakes) def _tabulate_facet_dofs(element, cell): "Compute intermediate representation of tabulate_facet_dofs." # Compute incidences incidence = __compute_incidence(cell.topological_dimension()) # Get topological dimension D = max([pair[0][0] for pair in incidence]) # Get the number of facets num_facets = cell.num_facets() # Find out which entities are incident to each facet incident = num_facets*[None] for facet in range(num_facets): incident[facet] = [pair[1] for pair in incidence if incidence[pair] == True and pair[0] == (D - 1, facet)] # Make list of dofs facet_dofs = [] entity_dofs = element.entity_dofs() for facet in range(num_facets): facet_dofs += [[]] for dim in entity_dofs: for entity in entity_dofs[dim]: if (dim, entity) in incident[facet]: facet_dofs[facet] += entity_dofs[dim][entity] facet_dofs[facet].sort() return facet_dofs def _interpolate_vertex_values(ufl_element, element): "Compute intermediate representation of interpolate_vertex_values." # Check for QuadratureElement for e in all_elements(element): if isinstance(e, QuadratureElement): return "Function is not supported/implemented for QuadratureElement." if isinstance(e, DiscontinuousLagrangeTrace): return "Function is not implemented for DiscontinuousLagrangeTrace." domain, = ufl_element.domains() # Assuming single domain cellname = domain.cell().cellname() ir = {} ir["geometric_dimension"] = domain.geometric_dimension() ir["topological_dimension"] = domain.topological_dimension() # Check whether computing the Jacobian is necessary mappings = element.mapping() ir["needs_jacobian"] = any("piola" in m for m in mappings) or any("pullback as metric" in m for m in mappings) ir["needs_oriented"] = needs_oriented_jacobian(element) # See note in _evaluate_dofs ir["reference_value_size"] = _value_size(element) ir["physical_value_size"] = _value_size(ufl_element) # Get vertices of reference cell fiat_cell = reference_cell(cellname) vertices = fiat_cell.get_vertices() # Compute data for each constituent element extract = lambda values: values[sorted(values.keys())[0]].transpose() all_fiat_elm = all_elements(element) ir["element_data"] = [{ # See note in _evaluate_dofs "reference_value_size": _value_size(e), "physical_value_size": _value_size(e), # FIXME: Get from corresponding ufl element "basis_values": extract(e.tabulate(0, vertices)), "mapping": e.mapping()[0], "space_dim": e.space_dimension()} for e in all_fiat_elm] # FIXME: Temporary hack! if len(ir["element_data"]) == 1: ir["element_data"][0]["physical_value_size"] = ir["physical_value_size"] # Consistency check, related to note in _evaluate_dofs # This will fail for e.g. (RT1 x DG0) on a manifold if sum(data["physical_value_size"] for data in ir["element_data"]) != ir["physical_value_size"]: ir = "Failed to set physical value size correctly for subelements." elif sum(data["reference_value_size"] for data in ir["element_data"]) != ir["reference_value_size"]: ir = "Failed to set reference value size correctly for subelements." return ir def _create_sub_foo(ufl_element, element_numbers): "Compute intermediate representation of create_sub_element/dofmap." return [element_numbers[e] for e in ufl_element.sub_elements()] def _create_foo_integral(integral_type, form_data): "Compute intermediate representation of create_foo_integral." return [itg_data.subdomain_id for itg_data in form_data.integral_data if itg_data.integral_type == integral_type and isinstance(itg_data.subdomain_id, int)] def _max_foo_subdomain_id(integral_type, form_data): "Compute intermediate representation of max_foo_subdomain_id." return form_data.num_sub_domains.get(integral_type, 0) # TODO: Rename in form_data def _has_foo_integrals(integral_type, form_data): "Compute intermediate representation of has_foo_integrals." v = (form_data.num_sub_domains.get(integral_type,0) > 0 or _create_default_foo_integral(integral_type, form_data) is not None) return bool(v) def _create_default_foo_integral(integral_type, form_data): "Compute intermediate representation of create_default_foo_integral." itg_data = [itg_data for itg_data in form_data.integral_data if (itg_data.subdomain_id == "otherwise" and itg_data.integral_type == integral_type)] ffc_assert(len(itg_data) in (0,1), "Expecting at most one default integral of each type.") return "otherwise" if itg_data else None #--- Utility functions --- # FIXME: KBO: This could go somewhere else, like in UFL? # MSA: There is probably something related in ufl somewhere, # but I don't understand quite what this does. # In particular it does not cover sub-sub-elements? Is that a bug? # Also look at function naming, use single '_' for utility functions. def all_elements(element): if isinstance(element, MixedElement): return element.elements() return [element] def _num_dofs_per_entity(element): """ Compute list of integers representing the number of dofs associated with a single mesh entity. Example: Lagrange of degree 3 on triangle: [1, 2, 1] """ entity_dofs = element.entity_dofs() return [len(entity_dofs[e][0]) for e in range(len(entity_dofs.keys()))] # These two are copied from old ffc def __compute_incidence(D): "Compute which entities are incident with which" # Compute the incident vertices for each entity sub_simplices = [] for dim in range(D + 1): sub_simplices += [__compute_sub_simplices(D, dim)] # Check which entities are incident, d0 --> d1 for d0 >= d1 incidence = {} for d0 in range(0, D + 1): for i0 in range(len(sub_simplices[d0])): for d1 in range(d0 + 1): for i1 in range(len(sub_simplices[d1])): if min([v in sub_simplices[d0][i0] for v in sub_simplices[d1][i1]]) == True: incidence[((d0, i0), (d1, i1))] = True else: incidence[((d0, i0), (d1, i1))] = False return incidence def __compute_sub_simplices(D, d): """Compute vertices for all sub simplices of dimension d (code taken from Exterior).""" # Number of vertices num_vertices = D + 1 # Special cases: d = 0 and d = D if d == 0: return [[i] for i in range(num_vertices)] elif d == D: return [list(range(num_vertices))] # Compute all permutations of num_vertices - (d + 1) permutations = compute_permutations(num_vertices - d - 1, num_vertices) # Iterate over sub simplices sub_simplices = [] for i in range(len(permutations)): # Pick tuple i among permutations (non-incident vertices) remove = permutations[i] # Remove vertices, keeping d + 1 vertices vertices = [v for v in range(num_vertices) if not v in remove] sub_simplices += [vertices] return sub_simplices def uses_integral_moments(element): "True if element uses integral moments for its degrees of freedom." integrals = set(["IntegralMoment", "FrobeniusIntegralMoment"]) tags = set([L.get_type_tag() for L in element.dual_basis() if L]) return len(integrals & tags) > 0 def needs_oriented_jacobian(element): # Check whether this element needs an oriented jacobian # (only contravariant piolas and pullback as metric seem to need it) return ("contravariant piola" in element.mapping() or "pullback as metric" in element.mapping()) ffc-1.6.0/ffc/representationutils.py000066400000000000000000000156321255571034100175210ustar00rootroot00000000000000"""This module contains utility functions for some code shared between quadrature and tensor representation.""" # Copyright (C) 2012-2015 Marie Rognes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Martin Alnaes 2013-2015 # Modified by Anders Logg 2014 from ufl.measure import integral_type_to_measure_name from ffc.fiatinterface import create_element from ffc.cpp import format from ffc.log import error def transform_component(component, offset, ufl_element): """ This function accounts for the fact that if the geometrical and topological dimension does not match, then for native vector elements, in particular the Piola-mapped ones, the physical value dimensions and the reference value dimensions are not the same. This has certain consequences for mixed elements, aka 'fun with offsets'. """ # This code is used for tensor/monomialtransformation.py and # quadrature/quadraturetransformerbase.py. domain, = ufl_element.domains() # Assuming single domain gdim = domain.geometric_dimension() tdim = domain.topological_dimension() # Do nothing if we are not in a special case: The special cases # occur if we have piola mapped elements (for which value_shape != # ()), and if gdim != tdim) if gdim == tdim: return component, offset all_mappings = create_element(ufl_element).mapping() special_case = (any(['piola' in m for m in all_mappings]) and ufl_element.num_sub_elements() > 1) if not special_case: return component, offset # Extract lists of reference and physical value dimensions by # sub-element reference_value_dims = [] physical_value_dims = [] for sub_element in ufl_element.sub_elements(): assert (len(sub_element.value_shape()) < 2), \ "Vector-valued assumption failed" if sub_element.value_shape() == (): reference_value_dims += [1] physical_value_dims += [1] else: reference_value_dims += [sub_element.value_shape()[0] - (gdim - tdim)] physical_value_dims += [sub_element.value_shape()[0]] # Figure out which sub-element number 'component' is in, # 'sub_element_number' contains the result tot = physical_value_dims[0] for sub_element_number in range(len(physical_value_dims)): if component < tot: break else: tot += physical_value_dims[sub_element_number+1] # Compute the new reference offset: reference_offset = sum(reference_value_dims[:sub_element_number]) physical_offset = sum(physical_value_dims[:sub_element_number]) shift = physical_offset - reference_offset # Compute the component relative to the reference frame reference_component = component - shift return reference_component, reference_offset def needs_oriented_jacobian(form_data): # Check whether this form needs an oriented jacobian (only forms # involgin contravariant piola mappings seem to need it) for ufl_element in form_data.unique_elements: element = create_element(ufl_element) if "contravariant piola" in element.mapping(): return True return False def initialize_integral_ir(representation, itg_data, form_data, form_id): """Initialize a representation dict with common information that is expected independently of which representation is chosen.""" # Mapping from recognized domain types to entity types entity_type = {"cell": "cell", "exterior_facet": "facet", "interior_facet": "facet", "vertex": "vertex", #"point": "vertex", # TODO: Not sure, clarify here what 'entity_type' refers to? "custom": "cell", #"overlap": "cell", #"interface": "cell", # FIXME: set this to the same as "custom" but not sure here... #"cutcell": "cell", }[itg_data.integral_type] # Extract data cell = itg_data.domain.cell() cellname = cell.cellname() tdim = cell.topological_dimension() assert all(tdim == itg.domain().topological_dimension() for itg in itg_data.integrals) # Set number of cells if not set TODO: Get automatically from number of domains num_cells = itg_data.metadata.get("num_cells") return {"representation": representation, "integral_type": itg_data.integral_type, "subdomain_id": itg_data.subdomain_id, "form_id": form_id, "rank": form_data.rank, "geometric_dimension": form_data.geometric_dimension, "topological_dimension": tdim, "entitytype": entity_type, "num_facets": cell.num_facets(), "num_vertices": cell.num_vertices(), "needs_oriented": needs_oriented_jacobian(form_data), "num_cells": num_cells, "enabled_coefficients": itg_data.enabled_coefficients, } def generate_enabled_coefficients(enabled_coefficients): # TODO: I don't know how to implement this using the format dict, this will do for now: initializer_list = ", ".join("true" if enabled else "false" for enabled in enabled_coefficients) code = '\n'.join([ "static const std::vector enabled({%s});" % initializer_list, "return enabled;", ]) return code def initialize_integral_code(ir, prefix, parameters): "Representation independent default initialization of code dict for integral from intermediate representation." code = {} code["class_type"] = ir["integral_type"] + "_integral" code["restrict"] = parameters["restrict_keyword"] code["classname"] = format["classname " + ir["integral_type"] + "_integral"](prefix, ir["form_id"], ir["subdomain_id"]) code["members"] = "" code["constructor"] = format["do nothing"] code["constructor_arguments"] = "" code["initializer_list"] = "" code["destructor"] = format["do nothing"] code["enabled_coefficients"] = generate_enabled_coefficients(ir["enabled_coefficients"]) #code["additional_includes_set"] = set() #ir["additional_includes_set"] return code ffc-1.6.0/ffc/restrictedelement.py000066400000000000000000000054701255571034100171170ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Marie Rognes, 2010. import numpy from ufl.utils.sorting import sorted_by_key from ffc.log import error class RestrictedElement: "Create a restriction of a given FIAT element." def __init__(self, element, indices, domain): if len(indices) == 0: error("No point in creating empty RestrictedElement.") self._element = element self._indices = indices self._entity_dofs = _extract_entity_dofs(element, indices) self._domain = domain def space_dimension(self): return len(self._indices) def value_shape(self): return self._element.value_shape() def degree(self): return self._element.degree() def entity_dofs(self): return self._entity_dofs def mapping(self): mappings = self._element.mapping() return [mappings[i] for i in self._indices] def dual_basis(self): dual = self._element.dual_basis() return [dual[i] for i in self._indices] def tabulate(self, order, points): result = self._element.tabulate(order, points) extracted = {} for (dtuple, values) in sorted_by_key(result): extracted[dtuple] = numpy.array([values[i] for i in self._indices]) return extracted # Used in evaluate_basis: def get_coeffs(self): coefficients = self._element.get_coeffs() return numpy.array([coefficients[i] for i in self._indices]) def dmats(self): return self._element.dmats() def get_num_members(self, arg): return self._element.get_num_members(arg) def domain(self): return self._domain def _extract_entity_dofs(element, indices): # FIXME: Readability counts entity_dofs = element.entity_dofs() dofs = {} for (dim, entities) in sorted_by_key(entity_dofs): dofs[dim] = {} for (entity, all_dofs) in sorted_by_key(entities): dofs[dim][entity] = [] for index in all_dofs: if index in indices: # print "index = ", index i = indices.index(index) dofs[dim][entity] += [i] return dofs ffc-1.6.0/ffc/tensor/000077500000000000000000000000001255571034100143275ustar00rootroot00000000000000ffc-1.6.0/ffc/tensor/__init__.py000066400000000000000000000002241255571034100164360ustar00rootroot00000000000000from .tensorrepresentation import compute_integral_ir from .tensorgenerator import generate_integral_code from .costestimation import estimate_cost ffc-1.6.0/ffc/tensor/costestimation.py000066400000000000000000000040341255571034100177470ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-25 # Last changed: 2014-04-15 # FFC modules from ffc.log import debug # FFC tensor representation modules from ffc.tensor.monomialextraction import extract_monomial_form from ffc.tensor.monomialtransformation import transform_monomial_form def estimate_cost(integral, function_replace_map): """ Estimate cost of tensor representation for integral. The cost is computed as the sum of the number of coefficients and derivatives, if the integrand can be represented as a monomial, and -1 if not. """ # Check that integral type is supported supported = ["cell", "exterior_facet", "interior_facet"] if not integral.integral_type() in supported: return -1 # Extract monomial integrand integrand = integral.integrand() try: monomial_form = extract_monomial_form([integrand], function_replace_map) transform_monomial_form(monomial_form) except Exception as exception: debug("Monomial extraction failed: " + str(exception)) return -1 # Check that we get just one integrand if not len(monomial_form) == 1: error("Expecting just one integrand.") # Compute cost cost = 0 for integrand in monomial_form: for monomial in integrand.monomials: cost = max(cost, len(monomial.coefficients) + len(monomial.transforms)) return cost ffc-1.6.0/ffc/tensor/geometrytensor.py000066400000000000000000000036031255571034100177710ustar00rootroot00000000000000# Copyright (C) 2004-2009 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Marie E. Rognes, 2007 # Modified by Kristian B. Oelgaard, 2009 # # First added: 2004-11-03 # Last changed: 2009-12-21 # FFC modules. from ffc.log import debug # FFC tensor representation modules. from .monomialtransformation import MonomialIndex from .multiindex import create_multiindex class GeometryTensor: """ This class represents the geometry tensor for a monomial term of a multilinear form. """ def __init__(self, monomial): "Create geometry tensor for given monomial." # Save monomial data self.determinants = monomial.determinants self.coefficients = monomial.coefficients self.transforms = monomial.transforms # Extract indices secondary_indices = monomial.extract_unique_indices(MonomialIndex.SECONDARY) external_indices = monomial.extract_unique_indices(MonomialIndex.EXTERNAL) # Create multiindices self.secondary_multi_index = create_multiindex(secondary_indices) self.external_multi_index = create_multiindex(external_indices) debug("Secondary multi index: " + str(self.secondary_multi_index)) debug("External multi index: " + str(self.external_multi_index)) ffc-1.6.0/ffc/tensor/monomialextraction.py000066400000000000000000000312401255571034100206150ustar00rootroot00000000000000"Extraction of monomial representations of UFL forms." # Copyright (C) 2008-2013 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Martin Alnaes, 2008, 2013 # Modified by Kristian B. Oelgaard # # First added: 2008-08-01 # Last changed: 2013-01-08 # UFL modules from ufl.classes import Form, Argument, Coefficient, ScalarValue, IntValue from ufl.algorithms import purge_list_tensors, apply_transformer, ReuseTransformer # FFC modules from ffc.log import info, debug, ffc_assert # Cache for computed integrand representations #_cache = {} def extract_monomial_form(integrands, function_replace_map): """ Extract monomial representation of form (if possible). When successful, the form is represented as a sum of products of scalar components of basis functions or derivatives of basis functions. If unsuccessful, MonomialException is raised. """ info("Extracting monomial form representation from UFL form") # Iterate over all integrals monomial_form = MonomialForm() for integrand in integrands: # Extract monomial representation if possible monomial_integrand = extract_monomial_integrand(integrand, function_replace_map) monomial_form.append(monomial_integrand) return monomial_form def extract_monomial_integrand(integrand, function_replace_map): "Extract monomial integrand (if possible)." # Check cache #if integrand in _cache: # debug("Reusing monomial integrand from cache") # return _cache[integrand] # Purge list tensors integrand = purge_list_tensors(integrand) # Apply monomial transformer monomial_integrand = apply_transformer(integrand, MonomialTransformer(function_replace_map)) # Store in cache #_cache[integrand] = monomial_integrand return monomial_integrand class MonomialException(Exception): "Exception raised when monomial extraction fails." def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs) class MonomialFactor: """ This class represents a monomial factor, that is, a derivative of a scalar component of a basis function. """ def __init__(self, arg=None): if isinstance(arg, MonomialFactor): self.function = arg.function self.components = arg.components self.derivatives = arg.derivatives self.restriction = arg.restriction elif isinstance(arg, (Argument, Coefficient)): self.function = arg self.components = [] self.derivatives = [] self.restriction = None if isinstance(arg, Argument) and arg.part() is not None: # Not supported (yet?) raise MonomialException("Unable to create monomial from expression: " + str(arg)) elif arg is None: self.function = None self.components = [] self.derivatives = [] self.restriction = None else: raise MonomialException("Unable to create monomial from expression: " + str(arg)) def element(self): return self.function.element() def index(self): if isinstance(self.function, Coefficient): return self.function.count() else: return self.function.number() def apply_derivative(self, indices): self.derivatives += indices def apply_restriction(self, restriction): self.restriction = restriction def replace_indices(self, old_indices, new_indices): if old_indices is None: self.components = new_indices else: _replace_indices(self.components, old_indices, new_indices) _replace_indices(self.derivatives, old_indices, new_indices) def __str__(self): if len(self.components) == 0: c = "" else: c = "[%s]" % ", ".join(str(c) for c in self.components) if len(self.derivatives) == 0: d0 = "" d1 = "" else: d0 = "(" + " ".join("d/dx_%s" % str(d) for d in self.derivatives) + " " d1 = ")" if self.restriction is None: r = "" else: r = "(%s)" % str(self.restriction) return d0 + str(self.function) + r + c + d1 class Monomial: "This class represents a product of monomial factors." def __init__(self, arg=None): if isinstance(arg, Monomial): self.float_value = arg.float_value self.factors = [MonomialFactor(v) for v in arg.factors] self.index_slots = arg.index_slots elif isinstance(arg, (MonomialFactor, Argument, Coefficient)): self.float_value = 1.0 self.factors = [MonomialFactor(arg)] self.index_slots = None elif isinstance(arg, ScalarValue): self.float_value = float(arg) self.factors = [] self.index_slots = None elif arg is None: self.float_value = 1.0 self.factors = [] self.index_slots = None else: raise MonomialException("Unable to create monomial from expression: " + str(arg)) def apply_derivative(self, indices): if not len(self.factors) == 1: raise MonomialException("Expecting a single factor.") self.factors[0].apply_derivative(indices) def apply_tensor(self, indices): if not self.index_slots is None: raise MonomialException("Expecting scalar-valued expression.") self.index_slots = indices def apply_indices(self, indices): for v in self.factors: v.replace_indices(self.index_slots, indices) self.index_slots = None def apply_restriction(self, restriction): for v in self.factors: v.apply_restriction(restriction) def __mul__(self, other): m = Monomial() m.float_value = self.float_value * other.float_value m.factors = self.factors + other.factors return m def __str__(self): if self.float_value == 1.0: float_value = "" else: float_value = "%g * " % self.float_value return float_value + " * ".join(str(v) for v in self.factors) class MonomialSum: "This class represents a sum of monomials." def __init__(self, arg=None): if isinstance(arg, MonomialSum): self.monomials = [Monomial(m) for m in arg.monomials] elif arg is None: self.monomials = [] else: self.monomials = [Monomial(arg)] def apply_derivative(self, indices): for m in self.monomials: m.apply_derivative(indices) def apply_tensor(self, indices): for m in self.monomials: m.apply_tensor(indices) def apply_indices(self, indices): for m in self.monomials: m.apply_indices(indices) def apply_restriction(self, restriction): for m in self.monomials: m.apply_restriction(restriction) def __add__(self, other): m0 = [Monomial(m) for m in self.monomials] m1 = [Monomial(m) for m in other.monomials] sum = MonomialSum() sum.monomials = m0 + m1 return sum def __mul__(self, other): sum = MonomialSum() for m0 in self.monomials: for m1 in other.monomials: sum.monomials.append(m0 * m1) return sum def __str__(self): return " + ".join(str(m) for m in self.monomials) class MonomialForm: """ This class represents a monomial form, that is, a sum of integrals, each represented as a MonomialSum. """ def __init__(self): self.integrands = [] def append(self, integrand): self.integrands.append(integrand) def __len__(self): return len(self.integrands) def __getitem__(self, i): return self.integrands[i] def __iter__(self): return iter(self.integrands) def __str__(self): if len(self.integrands) == 0: return "" s = "Monomial form of %d integral(s)\n" % len(self.integrands) s += len(s) * "-" + "\n" for integrand in self.integrands: s += "Integrand: " + str(integrand) + "\n" return s class MonomialTransformer(ReuseTransformer): """ This class defines the transformation rules for extraction of a monomial form represented as a MonomialSum from a UFL integral. """ def __init__(self, function_replace_map=None): ReuseTransformer.__init__(self) self._function_replace_map = function_replace_map or {} def expr(self, o, *ops): raise MonomialException("No handler defined for expression %s." % o._ufl_class_.__name__) def terminal(self, o): raise MonomialException("No handler defined for terminal %s." % o._ufl_class_.__name__) def variable(self, o): return self.visit(o.expression()) #--- Operator handles --- def division(self, o): # Handle division by scalars as multiplication by inverse denominator = o.ufl_operands[1] if not isinstance(denominator, ScalarValue): raise MonomialException("No handler defined for expression %s." % o._ufl_class_.__name__) inverse = self.scalar_value(ScalarValue(1.0/denominator.value())) numerator = self.visit(o.ufl_operands[0]) return self.product(o, inverse, numerator) def sum(self, o, s0, s1): s = s0 + s1 return s def product(self, o, s0, s1): s = s0 * s1 return s def index_sum(self, o, s, index): return s def indexed(self, o, s, indices): s = MonomialSum(s) s.apply_indices(indices) return s def component_tensor(self, o, s, indices): s = MonomialSum(s) s.apply_tensor(indices) return s def grad(self, o, s): # The representation # o = Grad(s) # is equivalent to # o = as_tensor(s[ii].dx(i), ii+(i,)) # with ii a tuple of free indices and i a single free index. # Make some unique utility indices from ufl import indices on = o.rank() ind = list(indices(on)) # Get underlying expression to take gradient of f, = o.ufl_operands fn = f.rank() ffc_assert(on == fn + 1, "Assuming grad always adds one axis.") s = MonomialSum(s) s.apply_indices(list(ind[:-1])) s = MonomialSum(s) s.apply_derivative([ind[-1]]) s = MonomialSum(s) s.apply_tensor(ind) return s def positive_restricted(self, o, s): s.apply_restriction("+") return s def negative_restricted(self, o, s): s.apply_restriction("-") return s def power(self, o, s, ignored_exponent_expressed_as_sum): (expr, exponent) = o.ufl_operands if not isinstance(exponent, IntValue): raise MonomialException("Cannot handle non-integer exponents.") e = int(exponent) if e < 0: raise MonomialException("Cannot handle negative exponents.") p = MonomialSum(Monomial()) for i in range(e): p = p * s return p #--- Terminal handlers --- def multi_index(self, multi_index): indices = [index for index in multi_index] return indices def argument(self, v): s = MonomialSum(self._function_replace_map.get(v,v)) return s def coefficient(self, v): s = MonomialSum(self._function_replace_map.get(v,v)) return s def scalar_value(self, x): s = MonomialSum(x) return s def _replace_indices(indices, old_indices, new_indices): "Handle replacement of subsets of multi indices." # Old and new indices must match if not len(old_indices) == len(new_indices): raise MonomialException("Unable to replace indices, mismatching index dimensions.") # Build index map index_map = {} for (i, index) in enumerate(old_indices): index_map[index] = new_indices[i] # Check all indices and replace for (i, index) in enumerate(indices): if index in old_indices: indices[i] = index_map[index] ffc-1.6.0/ffc/tensor/monomialintegration.py000066400000000000000000000275571255571034100210000ustar00rootroot00000000000000"This module implements efficient integration of monomial forms." # Copyright (C) 2004-2011 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Thanks to Robert C. Kirby for suggesting the initial algorithm that # this implementation is based on. # # Modified by Garth N. Wells, 2006 # Modified by Marie E. Rognes, 2008 # Modified by Kristian B. Oelgaard, 2009 # Python modules import numpy import time # FFC modules from ffc.log import info, debug, error from ffc.fiatinterface import create_element from ffc.fiatinterface import map_facet_points from ffc.quadrature_schemes import create_quadrature # FFC tensor representation modules from .multiindex import build_indices from .monomialextraction import MonomialException from .monomialtransformation import MonomialIndex def integrate(monomial, integral_type, facet0, facet1, quadrature_degree, quadrature_rule, cellname, facet_cellname): """Compute the reference tensor for a given monomial term of a multilinear form""" info("Precomputing integrals on reference element") # Start timing tic = time.time() # Initialize quadrature points and weights (points, weights) = _init_quadrature(monomial.arguments, integral_type, quadrature_degree, quadrature_rule, cellname, facet_cellname) # Initialize quadrature table for basis functions table = _init_table(monomial.arguments, integral_type, points, facet0, facet1) # Compute table Psi for each factor psis = [_compute_psi(v, table, len(points), integral_type) \ for v in monomial.arguments] # Compute product of all Psis A0 = _compute_product(psis, monomial.float_value * weights) # Report elapsed time and number of entries toc = time.time() - tic num_entries = numpy.prod(numpy.shape(A0)) debug("%d entries computed in %.3g seconds" % (num_entries, toc)) debug("Shape of reference tensor: " + str(numpy.shape(A0))) return A0 def _init_quadrature(arguments, integral_type, quadrature_degree, quadrature_rule, cellname, facet_cellname): "Initialize quadrature for given monomial." # Create quadrature rule and get points and weights if integral_type == "cell": (points, weights) = create_quadrature(cellname, quadrature_degree, quadrature_rule) else: (points, weights) = create_quadrature(facet_cellname, quadrature_degree, quadrature_rule) return (points, weights) def _init_table(arguments, integral_type, points, facet0, facet1): """Initialize table of basis functions and their derivatives at the given quadrature points for each element.""" # Compute maximum number of derivatives for each element num_derivatives = {} for v in arguments: ufl_element = v.element order = len(v.derivatives) if ufl_element in num_derivatives: num_derivatives[ufl_element] = max(order, num_derivatives[ufl_element]) else: num_derivatives[ufl_element] = order # Call FIAT to tabulate the basis functions for each element table = {} for (ufl_element, order) in num_derivatives.items(): fiat_element = create_element(ufl_element) if integral_type == "cell": table[(ufl_element, None)] = fiat_element.tabulate(order, points) elif integral_type == "exterior_facet": x = map_facet_points(points, facet0) table[(ufl_element, None)] = fiat_element.tabulate(order, x) elif integral_type == "interior_facet": x0 = map_facet_points(points, facet0) x1 = map_facet_points(points, facet1) table[(ufl_element, "+")] = fiat_element.tabulate(order, x0) table[(ufl_element, "-")] = fiat_element.tabulate(order, x1) return table def _compute_psi(v, table, num_points, integral_type): "Compute the table Psi for the given basis function v." # We just need to pick the values for Psi from the table, which is # somewhat tricky since the table created by tabulate_jet() is a # mix of list, dictionary and numpy.array. # # The dimensions of the resulting table are ordered as follows: # # one dimension corresponding to quadrature points # all dimensions corresponding to internal Indices # all dimensions corresponding to primary Indices # all dimensions corresponding to secondary Indices # # All fixed Indices are removed here. The first set of dimensions # corresponding to quadrature points and internal Indices are removed # later when we sum over these dimensions. # Get topological dimension of cell domain, = v.element.domains() # Assuming single domain tdim = domain.topological_dimension() # Get indices and shapes for components if len(v.components) == 0: cindex = [] cshape = [] elif len(v.components) == 1: cindex = [v.components[0]] cshape = [len(v.components[0].index_range)] else: raise MonomialException("Can only handle rank 0 or rank 1 tensors.") # Get indices and shapes for derivatives dindex = [d for d in v.derivatives] dshape = [len(d.index_range) for d in v.derivatives] # Get indices and shapes for basis functions vindex = [v.index] vshape = [len(v.index.index_range)] # Create list of indices that label the dimensions of the tensor Psi indices = cindex + dindex + vindex shapes = cshape + dshape + vshape + [num_points] # Initialize tensor Psi: component, derivatives, basis function, points Psi = numpy.zeros(shapes, dtype = numpy.float) # Iterate over derivative indices dlists = build_indices([index.index_range for index in dindex]) or [[]] if len(cindex) > 0: etable = table[(v.element, v.restriction)] for component in range(len(cindex[0].index_range)): for dlist in dlists: # Translate derivative multiindex to lookup tuple dtuple = _multiindex_to_tuple(dlist, tdim) # Get values from table Psi[component][tuple(dlist)] = \ etable[dtuple][:, cindex[0].index_range[component], :] else: etable = table[(v.element, v.restriction)] for dlist in dlists: # Translate derivative multiindex to lookup tuple dtuple = _multiindex_to_tuple(dlist, tdim) # Get values from table Psi[tuple(dlist)] = etable[dtuple] # Rearrange Indices as (fixed, internal, primary, secondary) (rearrangement, num_indices) = _compute_rearrangement(indices) indices = [indices[i] for i in rearrangement] Psi = numpy.transpose(Psi, rearrangement + (len(indices),)) # Remove fixed indices for i in range(num_indices[0]): Psi = Psi[0, ...] indices = [index for index in indices \ if not index.index_type == MonomialIndex.FIXED] # Put quadrature points first rank = Psi.ndim Psi = numpy.transpose(Psi, (rank - 1,) + tuple(range(0, rank - 1))) # Compute internal index positions for current Psi bpart = [i.index_id for i in indices if i.index_type == MonomialIndex.INTERNAL] return (Psi, indices, bpart) def _compute_product(psis, weights): "Compute special product of list of Psis." # The reference tensor is obtained by summing over quadrature # points and internal Indices the outer product of all the Psis # with the first dimension (corresponding to quadrature points) # and all internal dimensions removed. # Initialize zero reference tensor (will be rearranged later) (shape, indices) = _compute_shape(psis) A0 = numpy.zeros(shape, dtype= numpy.float) # Initialize list of internal multiindices bshape = _compute_internal_shape(psis) bindices = build_indices([list(range(b)) for b in bshape]) or [[]] # Sum over quadrature points and internal indices num_points = len(weights) for q in range(num_points): for b in bindices: # Compute outer products of subtables for current (q, b) B = weights[q] for (Psi, index, bpart) in psis: B = numpy.multiply.outer(B, Psi[ tuple([q] + [b[i] for i in bpart])]) # Add product to reference tensor numpy.add(A0, B, A0) # Rearrange Indices as (primary, secondary) (rearrangement, num_indices) = _compute_rearrangement(indices) A0 = numpy.transpose(A0, rearrangement) return A0 def _compute_rearrangement(indices): """ Compute rearrangement tuple for given list of Indices, so that the tuple reorders the given list of Indices with fixed, primary, secondary and internal Indices in rising order. """ fixed = _find_indices(indices, MonomialIndex.FIXED) internal = _find_indices(indices, MonomialIndex.INTERNAL) primary = _find_indices(indices, MonomialIndex.PRIMARY) secondary = _find_indices(indices, MonomialIndex.SECONDARY) assert len(fixed + internal + primary + secondary) == len(indices) return (tuple(fixed + internal + primary + secondary), \ (len(fixed), len(internal), len(primary), len(secondary))) def _compute_shape(psis): "Compute shape of reference tensor from given list of tables." shape, indices = [], [] for (Psi, index, bpart) in psis: num_internal = len([0 for i in index if i.index_type == MonomialIndex.INTERNAL]) shape += numpy.shape(Psi)[1 + num_internal:] indices += index[num_internal:] return (shape, indices) def _compute_internal_shape(psis): """ Compute shape for internal indices from given list of tables. Also compute a list of mappings from each table to the internal dimensions associated with that table. """ # First find the number of different internal indices (check maximum) bs = [b for (Psi, index, bpart) in psis for b in bpart] if len(bs) == 0: return [] bmax = max(bs) # Find the dimension for each internal index bshape = [0 for i in range(bmax + 1)] for (Psi, index, bpart) in psis: for i in range(len(bpart)): bshape[bpart[i]] = numpy.shape(Psi)[i + 1] # Check that we found the shape for each internal index if 0 in bshape: error("Unable to compute the shape for each internal index.") return bshape def _find_indices(indices, index_type): "Return sorted list of positions for given index type." pos = [i for i in range(len(indices)) if indices[i].index_type == index_type] val = [indices[i].index_id for i in range(len(indices)) if indices[i].index_type == index_type] return [pos[i] for i in numpy.argsort(val)] def _multiindex_to_tuple(dindex, cell_dimension): """ Compute lookup tuple from given derivative multiindex. Necessary since the table we get from FIAT is a dictionary with the tuples as keys. A derivative tuple specifies the number of derivatives in each space dimension, rather than listing the space dimensions for the derivatives. """ dtuple = [0 for i in range(cell_dimension)] for d in dindex: dtuple[d] += 1 return tuple(dtuple) ffc-1.6.0/ffc/tensor/monomialtransformation.py000066400000000000000000000477021255571034100215150ustar00rootroot00000000000000"Transformation of monomial representations of UFL forms." # Copyright (C) 2009 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Kristian B. Oelgaard, 2009 # Modified by Marie E. Rognes, 2010 # # First added: 2009-03-06 # Last changed: 2010-02-17 # UFL modules from ufl.classes import Argument from ufl.classes import Coefficient from ufl.classes import FixedIndex from ufl.permutation import build_component_numbering # FFC modules from ffc.log import info, error, ffc_assert from ffc.fiatinterface import create_element from ffc.utils import all_equal from ffc.representationutils import transform_component # FFC tensor representation modules from ffc.tensor.monomialextraction import MonomialForm from ffc.tensor.monomialextraction import MonomialException def transform_monomial_form(monomial_form): "Transform monomial form to reference element." info("Transforming monomial form to reference element") # Check that we get a monomial form ffc_assert(isinstance(monomial_form, MonomialForm), "Expecting a MonomialForm.") # Note that we check if each monomial has been transformed before # and if so we leave it untouched. This is to prevent repeated # transformation (which fails) which may sometimes happen as a # result of extracted integrands being cached by the monomial # extraction. # Transform each integral for integrand in monomial_form: for (i, monomial) in enumerate(integrand.monomials): if not isinstance(monomial, TransformedMonomial): integrand.monomials[i] = TransformedMonomial(monomial) class MonomialIndex: """ This class represents a monomial index. Each index has a type, a range and a unique id. Valid index types are listed below. """ FIXED = "fixed" # Integer index PRIMARY = "primary" # Argument basis function index SECONDARY = "secondary" # Index appearing both inside and outside integral INTERNAL = "internal" # Index appearing only inside integral EXTERNAL = "external" # Index appearing only outside integral def __init__(self, index=None, index_type=None, index_range=None, index_id=None): "Create index with given type, range and id." if isinstance(index, MonomialIndex): self.index_type = index.index_type self.index_range = [i for i in index.index_range] self.index_id = index.index_id else: self.index_type = index_type self.index_range = index_range self.index_id = index_id def __lt__(self, other): "Comparison operator." return self.index_id < other.index_id def __call__(self, primary=None, secondary=None, internal=None, external=None): "Evaluate index at current index list." if self.index_type == MonomialIndex.FIXED: return self.index_range[0] elif self.index_type == MonomialIndex.PRIMARY: if not primary: error("Missing index values for primary indices.") return primary[self.index_id] elif self.index_type == MonomialIndex.SECONDARY: if not secondary: error("Missing index values for secondary indices.") return secondary[self.index_id] elif self.index_type == MonomialIndex.INTERNAL: if not internal: error("Missing index values for internal auxiliary indices.") return internal[self.index_id] elif self.index_type == MonomialIndex.EXTERNAL: if not external: error("Missing index values for external auxiliary indices.") return external[self.index_id] else: error("Unknown index type " + str(self.type)) def __add__(self, offset): "Add offset to index range." index = MonomialIndex(self) index.index_range = [offset + i for i in index.index_range] return index def __sub__(self, offset): "Subtract offset from index range." return self + (-offset) def __str__(self): "Return informal string representation (pretty-print)." if self.index_type == MonomialIndex.FIXED: return str(self.index_range[0]) elif self.index_type == MonomialIndex.PRIMARY: return "i_" + str(self.index_id) elif self.index_type == MonomialIndex.SECONDARY: return "a_" + str(self.index_id) elif self.index_type == MonomialIndex.INTERNAL: return "g_" + str(self.index_id) elif self.index_type == MonomialIndex.EXTERNAL: return "b_" + str(self.index_id) else: return "?" class MonomialDeterminant: "This class representes a determinant factor in a monomial." def __init__(self, power=None, restriction=None): "Create empty monomial determinant." if power is None: self.power = 0 else: self.power = power self.restriction = restriction def __str__(self): "Return informal string representation (pretty-print)." # FIXME: This pretty-print is plain misleading b/c of the # implicit relic factor |det J| etc. if not self.restriction: r = "" else: r = "(%s)" % self.restriction if self.power == 0: return "|det F'%s|" % r elif self.power == 1: return "|det F'%s| (det F'%s)" % (r, r) else: return "|det F'%s| (det F'%s)^%s" % (r, r, str(self.power)) class MonomialCoefficient: "This class represents a coefficient in a monomial." def __init__(self, index, number): "Create monomial coefficient for given index and number." self.index = index self.number = number def __str__(self): "Return informal string representation (pretty-print)." return "c_" + str(self.index) class MonomialTransform: "This class represents a transform (mapping derivative) in a form." J = "J" JINV = "JINV" def __init__(self, index0, index1, transform_type, restriction, offset): "Create monomial transform." # Set data self.index0 = index0 self.index1 = index1 self.transform_type = transform_type self.restriction = restriction self.offset = offset # Subtract offset for fixed indices. Note that the index subtraction # creates a new index instance. This is ok here since a fixed index # does not need to match any other index (being the same instance) # in index summation and index extraction. if index0.index_type is MonomialIndex.FIXED: self.index0 = index0 - offset if index1.index_type is MonomialIndex.FIXED: self.index1 = index1 - offset def __str__(self): "Return informal string representation (pretty-print)." if self.restriction is None: r = "" else: r = "(%s)" % str(self.restriction) if self.transform_type == "J": return "dx_%s/dX_%s%s" % (str(self.index0), str(self.index1), r) else: return "dX_%s/dx_%s%s" % (str(self.index0), str(self.index1), r) class MonomialArgument: """ This class represents a monomial argument, that is, a derivative of a scalar component of a basis function on the reference element. """ def __init__(self, element, index, components, derivatives, restriction): "Create monomial argument." self.element = element self.index = index self.components = components self.derivatives = derivatives self.restriction = restriction def __str__(self): "Return informal string representation (pretty-print)." if len(self.components) == 0: c = "" else: c = "[%s]" % ", ".join(str(c) for c in self.components) if len(self.derivatives) == 0: d0 = "" d1 = "" else: d0 = "(" + " ".join("d/dX_%s" % str(d) for d in self.derivatives) + " " d1 = ")" if self.restriction is None: r = "" else: r = "(%s)" % str(self.restriction) v = "V_" + str(self.index) return d0 + v + r + c + d1 class TransformedMonomial: """ This class represents a monomial form after transformation to the reference element. """ def __init__(self, monomial): "Create transformed monomial from given monomial." # Reset monomial data self.float_value = monomial.float_value self.determinants = [] self.coefficients = [] self.transforms = [] self.arguments = [] # Reset index counters _reset_indices() # Initialize index map index_map = {} # Iterate over factors for f in monomial.factors: # Create FIAT element ufl_element = f.element() fiat_element = create_element(ufl_element) # Note nifty aspect here: when gdim != tdim, it might be # (in particular, is for H(div)/H(curl), that the value # dimension is different for the physical and reference # elements. # Get number of components # FIXME: Can't handle tensor-valued elements: vdim = shape[0] shape = ufl_element.value_shape() assert(len(shape) <= 1), \ "MonomialTransformation does not handle tensor-valued elements" if len(shape) == 0: vdim = 1 else: vdim = shape[0] # Extract dimensions sdim = fiat_element.space_dimension() domain, = ufl_element.domains() # Assuming single domain gdim = domain.geometric_dimension() tdim = domain.topological_dimension() # Extract basis function index and coefficients if isinstance(f.function, Argument): vindex = MonomialIndex(index_type=MonomialIndex.PRIMARY, index_range=list(range(sdim)), index_id=f.index()) elif isinstance(f.function, Coefficient): vindex = MonomialIndex(index_range=list(range(sdim))) coefficient = MonomialCoefficient(vindex, f.index()) self.coefficients.append(coefficient) # Extract components components = self._extract_components(f, index_map, vdim) if len(components) > 1: raise MonomialException("Can only handle rank 0 or rank 1 tensors.") # Handle non-affine mappings (Piola) if len(components) > 0: # We can only handle rank 1 elements for now component = components[0] # Get mapping (all need to be equal) mappings = [] for i in component.index_range: (offset, ufl_sub_element) = ufl_element.extract_component(i) fiat_sub_element = create_element(ufl_sub_element) mappings.extend(fiat_sub_element.mapping()) if not all_equal(mappings): raise MonomialException("Mappings differ: " + str(mappings)) mapping = mappings[0] # Get component index relative to its sub element and its sub element (component_index, sub_element) = ufl_element.extract_component(component.index_range[0]) # Get offset if len(component_index) == 0: offset = 0 else: offset = component.index_range[0] - component_index[0] # MER: Need to handle mappings in special ways if gdim # != tdim and some Piolas are present. This could # probably be merged with the offset code above, but I # was not able to wrap my head around the offsets # always referring to component.index_range[0]. if (gdim != tdim): assert len(component.index_range) == 1, \ "Component transform not implemented for this case. Please request this feature." component, offset = transform_component(component.index_range[0], offset, ufl_element) component = MonomialIndex(index_type=MonomialIndex.FIXED, index_range=[component], index_id=None) components = [component, ] # Add transforms where appropriate if mapping == "contravariant piola": # phi(x) = (det J)^{-1} J Phi(X) index0 = component index1 = MonomialIndex(index_range=list(range(tdim))) + offset transform = MonomialTransform(index0, index1, MonomialTransform.J, f.restriction, offset) self.transforms.append(transform) determinant = MonomialDeterminant(power=-1, restriction=f.restriction) self.determinants.append(determinant) components[0] = index1 elif mapping == "covariant piola": # phi(x) = J^{-T} Phi(X) index0 = MonomialIndex(index_range=list(range(tdim))) + offset index1 = component transform = MonomialTransform(index0, index1, MonomialTransform.JINV, f.restriction, offset) self.transforms.append(transform) components[0] = index0 # Extract derivatives / transforms derivatives = [] for d in f.derivatives: index0 = MonomialIndex(index_range=list(range(tdim))) if d in index_map: index1 = index_map[d] elif isinstance(d, FixedIndex): index1 = MonomialIndex(index_type=MonomialIndex.FIXED, index_range=[int(d)], index_id=int(d)) else: index1 = MonomialIndex(index_range=list(range(gdim))) index_map[d] = index1 transform = MonomialTransform(index0, index1, MonomialTransform.JINV, f.restriction, 0) self.transforms.append(transform) derivatives.append(index0) # Extract restriction restriction = f.restriction # Create basis function v = MonomialArgument(ufl_element, vindex, components, derivatives, restriction) self.arguments.append(v) # Figure out secondary and auxiliary indices internal_indices = self._extract_internal_indices(None) external_indices = self._extract_external_indices(None) for i in internal_indices + external_indices: # Skip already visited indices if not i.index_type is None: continue # Set index type and id num_internal = len([j for j in internal_indices if j == i]) num_external = len([j for j in external_indices if j == i]) if num_internal == 1 and num_external == 1: i.index_type = MonomialIndex.SECONDARY i.index_id = _next_secondary_index() elif num_internal == 2 and num_external == 0: i.index_type = MonomialIndex.INTERNAL i.index_id = _next_internal_index() elif num_internal == 0 and num_external == 2: i.index_type = MonomialIndex.EXTERNAL i.index_id = _next_external_index() else: raise Exception("Summation index does not appear exactly twice: %s" % str(i)) def extract_unique_indices(self, index_type=None): "Return all unique indices for monomial w.r.t. type and id (not range)." indices = [] for index in self._extract_indices(index_type): if not index in indices: indices.append(index) return indices def _extract_components(self, f, index_map, vdim): "Return list of components." components = [] for c in f.components: if c in index_map: index = index_map[c] elif isinstance(c, FixedIndex): # Map component using component map from UFL. # KBO: Is this the right place to add, and do we only have # scalar components in the tensor representation at this stage # in the representation? comp_map, comp_num = build_component_numbering(f.element().value_shape(), f.element().symmetry()) comp = comp_map[(int(c),)] index = MonomialIndex(index_type=MonomialIndex.FIXED, index_range=[comp], index_id=None) else: index = MonomialIndex(index_range=list(range(vdim))) index_map[c] = index components.append(index) return components def _extract_internal_indices(self, index_type=None): "Return list of indices appearing inside integral." indices = [] for v in self.arguments: indices += [v.index] + v.components + v.derivatives return [i for i in indices if i.index_type == index_type] def _extract_external_indices(self, index_type=None): "Return list of indices appearing outside integral." indices = [c.index for c in self.coefficients] + \ [t.index0 for t in self.transforms] + \ [t.index1 for t in self.transforms] return [i for i in indices if i.index_type == index_type] def _extract_indices(self, index_type=None): "Return all indices for monomial." return self._extract_internal_indices(index_type) + \ self._extract_external_indices(index_type) def __str__(self): "Return informal string representation (pretty-print)." factors = [] if not self.float_value == 1.0: factors.append(self.float_value) factors += self.determinants factors += self.coefficients factors += self.transforms return " * ".join([str(f) for f in factors]) + " | " + " * ".join([str(v) for v in self.arguments]) # Index counters _current_secondary_index = 0 _current_internal_index = 0 _current_external_index = 0 def _next_secondary_index(): "Return next available secondary index." global _current_secondary_index _current_secondary_index += 1 return _current_secondary_index - 1 def _next_internal_index(): "Return next available internal index." global _current_internal_index _current_internal_index += 1 return _current_internal_index - 1 def _next_external_index(): "Return next available external index." global _current_external_index _current_external_index += 1 return _current_external_index - 1 def _reset_indices(): "Reset all index counters." global _current_secondary_index global _current_internal_index global _current_external_index _current_secondary_index = 0 _current_internal_index = 0 _current_external_index = 0 ffc-1.6.0/ffc/tensor/multiindex.py000066400000000000000000000047451255571034100170750ustar00rootroot00000000000000# Copyright (C) 2004-2009 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Garth N. Wells, 2006 # Modified by Marie E. Rognes, 2007 # Modified by Kristian B. Oelgaard, 2009 # # First added: 2004-11-03 # Last changed: 2009-12-21 # Python modules. import functools import numpy # FFC modules. from ffc.utils import listcopy from ffc.log import error def build_indices(dims): "Create a list of all index combinations." if not dims: return [[]] ranges = listcopy(dims) return functools.reduce(outer_join, ranges, [[]]) def outer_join(a, b): """Let a be a list of lists and b a list. We append each element of b to each list in a and return the resulting list of lists.""" outer = [] for i in range(len(a)): for j in range(len(b)): outer += [a[i] + [b[j]]] return outer def create_multiindex(indices): "Create multiindex for given list of indices." # Check that we got all indices correctly indices = sorted(indices) for (i, index) in enumerate(indices): if not i == index.index_id: error("Unable to extract all indices.") # Get dimensions dims = [list(range(len(index.index_range))) for index in indices] return MultiIndex(dims) class MultiIndex: """ A MultiIndex represents a list of indices and holds the following data: rank - rank of multiindex dims - a list of dimensions indices - a list of all possible multiindex values """ def __init__(self, dims): "Create multiindex from given list of ranges" self.rank = len(dims) self.dims = [len(dim) for dim in dims] self.indices = build_indices(dims) return def __str__(self): "Return informal string representation (pretty-print)." return "rank = %d dims = %s indices = %s" % (self.rank, str(self.dims), str(self.indices)) ffc-1.6.0/ffc/tensor/referencetensor.py000066400000000000000000000051001255571034100200660ustar00rootroot00000000000000# Copyright (C) 2004-2009 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Garth N. Wells 2006 # Modified by Kristian B. Oelgaard, 2009. # # First added: 2004-11-03 # Last changed: 2011-11-28 # FFC modules. from ffc.log import debug # FFC tensor representation modules. from .monomialintegration import integrate from .monomialtransformation import MonomialIndex from .multiindex import create_multiindex class ReferenceTensor: """ This class represents the reference tensor for a monomial term of a multilinear form. """ def __init__(self, monomial, integral_type, facet0, facet1, quadrature_order, quadrature_rule, cellname, facet_cellname): "Create reference tensor for given monomial." # Compute reference tensor self.A0 = integrate(monomial, integral_type, facet0, facet1, quadrature_order, quadrature_rule, cellname, facet_cellname) # Extract indices primary_indices = monomial.extract_unique_indices(MonomialIndex.PRIMARY) secondary_indices = monomial.extract_unique_indices(MonomialIndex.SECONDARY) internal_indices = monomial.extract_unique_indices(MonomialIndex.INTERNAL) # Create multiindices self.primary_multi_index = create_multiindex(primary_indices) self.secondary_multi_index = create_multiindex(secondary_indices) self.internal_multi_index = create_multiindex(internal_indices) # Store monomial self.monomial = monomial debug("Primary multi index: " + str(self.primary_multi_index)) debug("Secondary multi index: " + str(self.secondary_multi_index)) debug("Internal multi index: " + str(self.internal_multi_index)) ffc-1.6.0/ffc/tensor/tensorgenerator.py000066400000000000000000000314551255571034100201320ustar00rootroot00000000000000"Code generator for tensor representation" # Copyright (C) 2004-2013 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Kristian B. Oelgaard, 2009-2010 # Modified by Marie Rognes, 2007 # Modified by Garth N. Wells, 2009 # Modified by Mehdi Nikbakht, 2010 # Modified by Martin Alnaes, 2013 # # First added: 2004-11-03 # Last changed: 2013-02-10 # FFC modules from ffc.log import error from ffc.cpp import format, remove_unused, count_ops # FFC tensor representation modules from ffc.tensor.monomialtransformation import MonomialIndex from ffc.representationutils import initialize_integral_code def generate_integral_code(ir, prefix, parameters): "Generate code for integral from intermediate representation." code = initialize_integral_code(ir, prefix, parameters) code["tabulate_tensor"] = _tabulate_tensor(ir, parameters) return code def _tabulate_tensor(ir, parameters): "Generate code for tabulate_tensor." # Prefetch formats to speed up code generation comment = format["comment"] switch = format["switch"] # Set of used variables for Jacobian and geometry tensor j_set = set() g_set = set() # Extract data from intermediate representation AK = ir["AK"] integral_type = ir["integral_type"] tdim = ir["topological_dimension"] gdim = ir["geometric_dimension"] oriented = ir["needs_oriented"] num_facets = ir["num_facets"] # Check integral type and generate code if integral_type == "cell": # Generate code for one single tensor contraction t_code = _generate_tensor_contraction(AK, parameters, g_set) # Generate code for geometry tensors g_code = _generate_geometry_tensors(AK, j_set, g_set, tdim, gdim) # Generate code for basic geometric quantities j_code = "" j_code += format["compute_jacobian"](tdim, gdim) j_code += "\n" j_code += format["compute_jacobian_inverse"](tdim, gdim) if oriented: j_code += format["orientation"](tdim, gdim) j_code += "\n" j_code += format["scale factor snippet"] elif integral_type == "exterior_facet": # Generate code for num_facets tensor contractions cases = [None for i in range(num_facets)] for i in range(num_facets): cases[i] = _generate_tensor_contraction(AK[i], parameters, g_set) t_code = switch(format["facet"](None), cases) # Generate code for geometry tensors g_code = _generate_geometry_tensors(AK[0], j_set, g_set, tdim, gdim) # Generate code for Jacobian j_code = "" j_code += format["compute_jacobian"](tdim, gdim) j_code += "\n" j_code += format["compute_jacobian_inverse"](tdim, gdim) if oriented: j_code += format["orientation"](tdim, gdim) j_code += "\n" j_code += format["facet determinant"](tdim, gdim) elif integral_type == "interior_facet": # Generate code for num_facets x num_facets tensor contractions cases = [[None for j in range(num_facets)] for i in range(num_facets)] for i in range(num_facets): for j in range(num_facets): cases[i][j] = _generate_tensor_contraction(AK[i][j], parameters, g_set) t_code = switch(format["facet"]("+"), [switch(format["facet"]("-"), cases[i]) for i in range(len(cases))]) # Generate code for geometry tensors g_code = _generate_geometry_tensors(AK[0][0], j_set, g_set, tdim, gdim) # Generate code for Jacobian j_code = "" for _r in ["+", "-"]: j_code += format["compute_jacobian"](tdim, gdim, r=_r) j_code += "\n" j_code += format["compute_jacobian_inverse"](tdim, gdim, r=_r) j_code += "\n" if oriented: j_code += format["orientation"](tdim, gdim, r=_r) j_code += format["facet determinant"](tdim, gdim, r="+") j_code += "\n" else: error("Unhandled integral type: " + str(integral_type)) # Remove unused declarations from Jacobian code j_code = remove_unused(j_code, j_set) # Compute total number of operations j_ops, g_ops, t_ops = [count_ops(c) for c in (j_code, g_code, t_code)] total_ops = j_ops + g_ops + t_ops # Add generated code lines = [] lines.append(comment("Number of operations (multiply-add pairs) for Jacobian data: %d" % j_ops)) lines.append(comment("Number of operations (multiply-add pairs) for geometry tensor: %d" % g_ops)) lines.append(comment("Number of operations (multiply-add pairs) for tensor contraction: %d" % t_ops)) lines.append(comment("Total number of operations (multiply-add pairs): %d" % total_ops)) lines.append("") lines.append(j_code) lines.append("") lines.append(comment("Compute geometry tensor")) lines.append(g_code) lines.append("") lines.append(comment("Compute element tensor")) lines.append(t_code) return "\n".join(lines) def _generate_tensor_contraction(terms, parameters, g_set): """ Generate code for computation of tensor contraction """ return _generate_tensor_contraction_standard(terms, parameters, g_set) def _generate_tensor_contraction_standard(terms, parameters, g_set): """ Generate code for computation of tensor contraction using full tensor contraction. """ # Prefetch formats to speed up code generation iadd = format["iadd"] assign = format["assign"] element_tensor = format["element tensor"] geometry_tensor = format["geometry tensor"] zero = format["float"](0) inner_product = format["inner product"] # True if we should add to element tensor (not used) incremental = False # Get machine precision epsilon = parameters["epsilon"] # Get list of primary indices (should be the same so pick first) A0, GK, optimized_contraction = terms[0] primary_indices = A0.primary_multi_index.indices # Generate code for geometry tensor entries gk_tensor = [] for (j, (A0, GK, optimized_contraction)) in enumerate(terms): gk_tensor_j = [] for a in A0.secondary_multi_index.indices: gk_tensor_j.append((geometry_tensor(j, a), a)) gk_tensor.append((gk_tensor_j, j)) # Generate code for computing the element tensor lines = [] for (k, i) in enumerate(primary_indices): name = element_tensor(k) coefficients = [] entries = [] for (gka, j) in gk_tensor: (A0, GK, optimized_contraction) = terms[j] for (gk, a) in gka: a0 = A0.A0[tuple(i + a)] # Skip small values if abs(a0) < epsilon: continue # Compute value coefficients.append(a0) entries.append(gk) # Remember that gk has been used g_set.add(gk) # Compute inner product value = inner_product(coefficients, entries) # Handle special case value = value or zero # Add value if incremental: lines.append(iadd(name, value)) else: lines.append(assign(name, value)) return "\n".join(lines) def _generate_geometry_tensors(terms, j_set, g_set, tdim, gdim): "Generate code for computation of geometry tensors." # Prefetch formats to speed up code generation format_add = format["addition"] format_geometry_tensor = format["geometry tensor"] format_scale_factor = format["scale factor"] format_declaration = format["const float declaration"] # Iterate over all terms lines = [] offset = 0 det_used = False for (i, term) in enumerate(terms): # Get secondary indices A0, GK, optimized_contraction = term secondary_indices = GK.secondary_multi_index.indices # Hack to keep old code generation based on factorization of GK # in case we want to reimplement factorization GKs = [GK] # Iterate over secondary indices for a in secondary_indices: # Skip code generation if term is not used if not format["geometry tensor"](i, a) in g_set: continue # Compute factorized values values = [_generate_entry(GK, a, offset + j, j_set, tdim, gdim) \ for (j, GK) in enumerate(GKs)] # Sum factorized values name = format_geometry_tensor(i, a) value = format_add(values) # Multiply with determinant factor dets = GK.determinants value = _multiply_value_by_det(value, dets, len(values) > 1, j_set) det_used = True # Add code lines.append(format_declaration(name, value)) # Add to offset offset += len(GKs) # Add scale factor if det_used: j_set.add(format_scale_factor) # meg says: If all values vanish, det is not used. return "\n".join(lines) def _generate_entry(GK, a, i, j_set, tdim, gdim): "Generate code for the value of a GK entry." # Prefetch formats to speed up code generation grouping = format["grouping"] add = format["addition"] multiply = format["multiply"] # Compute product of factors outside sum factors = _extract_factors(GK, a, None, j_set, tdim, gdim, MonomialIndex.SECONDARY) # Compute sum of products of factors inside sum terms = [multiply(_extract_factors(GK, a, b, j_set, tdim, gdim, MonomialIndex.EXTERNAL)) for b in GK.external_multi_index.indices] # Compute product if factors: entry = multiply(factors + [grouping(add(terms))]) else: entry = add(terms) return entry def _multiply_value_by_det(value, dets, is_sum, j_set): "Generate code for multiplication of value by determinant(s)." # FIXME: MER: This is way complicated than it should be # Cell / exterior facets: d = [] if all([det.restriction == None for det in dets]): total_power = sum(det.power for det in dets) if not total_power == 0: J = format["det(J)"](None) d += [format["power"](J, total_power)] j_set.add(J) # Interior facets else: for det in dets: if not det.power == 0: J = format["det(J)"](det.restriction) d += [format["power"](J, det.power)] j_set.add(J) if value == "1.0": v = [] elif is_sum: v = [format["grouping"](value)] else: v = [value] return format["multiply"](d + [format["scale factor"]] + v) def _extract_factors(GK, a, b, j_set, tdim, gdim, index_type): "Extract factors of given index type in GK entry." # Prefetch formats to speed up code generation coefficient = format["coefficient"] transform = format["transform"] # List of factors factors = [] # Compute product of coefficients for c in GK.coefficients: if c.index.index_type == index_type: factors.append(coefficient(c.number, c.index(secondary=a))) # Compute product of transforms for t in GK.transforms: # Note non-trivial logic here if index_type == MonomialIndex.EXTERNAL: include_index = MonomialIndex.EXTERNAL in (t.index0.index_type, t.index1.index_type) else: include_index = not (MonomialIndex.EXTERNAL in (t.index0.index_type, t.index1.index_type)) # Add factor if include_index: # FIXME: Dimensions of J and K are transposed, what is the right thing to fix this hack? if t.transform_type == "J": #MonomialTransform.J: dim0, dim1 = gdim, tdim elif t.transform_type == "JINV": #MonomialTransform.JINV: dim0, dim1 = tdim, gdim else: error("Unknown transform type, fix this hack.") factors.append(transform(t.transform_type, t.index0(secondary=a, external=b), t.index1(secondary=a, external=b), dim0, dim1, t.restriction)) j_set.add(factors[-1]) return factors ffc-1.6.0/ffc/tensor/tensorreordering.py000066400000000000000000000076701255571034100203060ustar00rootroot00000000000000"Reordering of entries in reference tensor for interior facets." # Copyright (C) 2006-2009 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2006-12-01 # Last changed: 2010-02-10 # Python modules import numpy # FFC tensor representation modules from .monomialtransformation import MonomialIndex from .multiindex import MultiIndex def reorder_entries(terms): """Reorder entries to compute the reference tensor for an interior facet from the the reduced reference tensor.""" # Iterate over terms for term in terms: # Compute restrictions corresponding to indices (restrictions, idims, adims) = __compute_restrictions(term) dims = idims + adims # Compute position where to insert position = [] for i in range(len(restrictions)): dim = dims[i] if restrictions[i] == "+": position = position + [slice(0, dim/2)] elif restrictions[i] == "-": position = position + [slice(dim/2, dim)] else: position = position + [slice(0, dim)] # Initialize empty reference tensor of double size in each dimension tensor = numpy.zeros(dims, dtype=numpy.float) # Insert reduced reference tensor into reference tensor (A0, GK, optimized_contraction) = term tensor[position] = A0.A0 A0.A0 = tensor # Reinitialize indices to new size A0.primary_multi_index = MultiIndex([list(range(idim)) for idim in idims]) A0.secondary_multi_index = MultiIndex([list(range(adim)) for adim in adims]) GK.secondary_multi_index = A0.secondary_multi_index def __compute_restrictions(term): """Compute restrictions corresponding to indices for given term. For indices at basis functions, we need to double the size of the reference tensor in the corresponding dimension, but for other dimensions corresponding to component indices and derivatives, the size remains the same.""" # Get dimensions for primary and secondary indices A0, GK, optimized_contraction = term idims = A0.primary_multi_index.dims adims = A0.secondary_multi_index.dims # Get basis functions for term arguments = A0.monomial.arguments # Create empty list of restrictions for indices restrictions = [None for i in range(len(idims) + len(adims))] # Extract restrictions corresponding to primary indices at basis functions for i in range(len(idims)): for v in arguments: if v.index.index_type == MonomialIndex.PRIMARY and v.index.index_id == i: restrictions[i] = v.restriction break # Extract restrictions corresponding to secondary indices at basis functions for i in range(len(adims)): for v in arguments: if v.index.index_type == MonomialIndex.SECONDARY and v.index.index_id == i: restrictions[len(idims) + i] = v.restriction break # Compute new dimensions new_idims = [i for i in idims] new_adims = [i for i in adims] for i in range(len(new_idims)): if not restrictions[i] == None: new_idims[i] = 2*new_idims[i] for i in range(len(new_adims)): if not restrictions[i + len(new_idims)] == None: new_adims[i] = 2*new_adims[i] return (restrictions, new_idims, new_adims) ffc-1.6.0/ffc/tensor/tensorrepresentation.py000066400000000000000000000112731255571034100212020ustar00rootroot00000000000000"""This module implements the representation of a multilinear form as a sum of tensor contractions. The following possible optimizations are currently not implemented but might be (re-)implemented in a future version of FFC 1. Factorization of common reference tensors """ # Copyright (C) 2007-2014 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Kristian B. Oelgaard, 2010. # Modified by Martin Alnaes, 2013 # FFC modules from ffc.log import info, error from ffc.representationutils import initialize_integral_ir # FFC tensor representation modules from ffc.tensor.monomialextraction import extract_monomial_form from ffc.tensor.monomialtransformation import transform_monomial_form from ffc.tensor.referencetensor import ReferenceTensor from ffc.tensor.geometrytensor import GeometryTensor from ffc.tensor.tensorreordering import reorder_entries def compute_integral_ir(itg_data, form_data, form_id, element_numbers, parameters): "Compute intermediate represention of integral." info("Computing tensor representation") # Extract monomial representation integrands = [itg.integrand() for itg in itg_data.integrals] monomial_form = extract_monomial_form(integrands, form_data.function_replace_map) # Transform monomial form to reference element transform_monomial_form(monomial_form) # Get some integral properties integral_type = itg_data.integral_type quadrature_degree = itg_data.metadata["quadrature_degree"] quadrature_rule = itg_data.metadata["quadrature_rule"] # Get some cell properties cell = itg_data.domain.cell() cellname = cell.cellname() facet_cellname = cell.facet_cellname() num_facets = cell.num_facets() # Helper to simplify code below compute_terms = lambda i, j: _compute_terms(monomial_form, i, j, integral_type, quadrature_degree, quadrature_rule, cellname, facet_cellname) # Compute representation of cell tensor if integral_type == "cell": # Compute sum of tensor representations terms = compute_terms(None, None) elif integral_type == "exterior_facet": # Compute sum of tensor representations for each facet terms = [compute_terms(i, None) for i in range(num_facets)] elif integral_type == "interior_facet": # Compute sum of tensor representations for each facet-facet pair terms = [[compute_terms(i, j) for j in range(num_facets)] for i in range(num_facets)] for i in range(num_facets): for j in range(num_facets): reorder_entries(terms[i][j]) else: error("Unhandled domain type: " + str(integral_type)) # Initialize representation and store terms ir = initialize_integral_ir("tensor", itg_data, form_data, form_id) ir["AK"] = terms return ir def _compute_terms(monomial_form, facet0, facet1, integral_type, quadrature_degree, quadrature_rule, cellname, facet_cellname): "Compute list of tensor contraction terms for monomial form." # Compute terms terms = [] for integrand in monomial_form: # Iterate over monomials of integrand for monomial in integrand.monomials: # Compute reference tensor A0 = ReferenceTensor(monomial, integral_type, facet0, facet1, quadrature_degree, quadrature_rule, cellname, facet_cellname) # Compute geometry tensor GK = GeometryTensor(monomial) # Append term terms.append((A0, GK, None)) return terms ffc-1.6.0/ffc/timeelements.py000066400000000000000000000100441255571034100160610ustar00rootroot00000000000000# Copyright (C) 2012 Benjamin Kehlet # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Marie E. Rognes, 2012 # # First added: 2012-08-15 # Last changed: 2012-09-07 from FIAT import finite_element, polynomial_set, dual_set, functional, reference_element import ffc_time_ext.time_elements_ext as ext import numpy class TimeElementDualSet(dual_set.DualSet): """. """ def __init__(self, family, degree): assert(family == "Lobatto" or family == "Radau"), \ "Unknown time element '%s'" % family if family == "Lobatto" : assert (degree > 0), "Lobatto not defined for degree < 1!" else : assert(degree >= 0), "Degree must be >= 0" # ids is a map from mesh entity (numbers) to dof numbers ids = {} # dofs is a list of functionals dofs = [] # Only defined in 1D (on an inteval) cell = reference_element.UFCInterval() self.coords = (ext.compute_lobatto_points(degree) if family == "Lobatto" else ext.compute_radau_points(degree)) points = [(c,) for c in self.coords] # Create dofs from points dofs = [functional.PointEvaluation(cell, point) for point in points] # Create ids if family == "Lobatto": ids[0] = {0: [0], 1: [len(points)-1]} ids[1] = {0: list(range(1, len(points)-1))} elif family == "Radau": ids[0] = {0: [], 1: []} ids[1] = {0: list(range(len(points)))} # Treat all Radau points as internal else: error("Undefined family: %s" % family) # Initialize dual set dual_set.DualSet.__init__(self, dofs, cell, ids) class TimeElement(finite_element.FiniteElement): """.""" def __init__(self, family, degree): "Create time element with given (polynomial degree)." # Only defined in 1D (on an inteval) cell = reference_element.UFCInterval() # Initialize polynomial space of degree 'degree' polynomial_space = polynomial_set.ONPolynomialSet(cell, degree) # Create dual (degrees of freedom) dual = TimeElementDualSet(family, degree) # Initialize super class finite_element.FiniteElement.__init__(self, polynomial_space, dual, degree ) def compute_quadrature_weights(self) : """Compute the quadrature weights by solving a linear system of equations for exact integration of polynomials. We compute the integrals over [-1,1] of the Legendre polynomials of degree <= n - 1; These integrals are all zero, except for the integral of P0 which is 2. This requires that the n-point quadrature rule is exact at least for polynomials of degree n-1.""" n = len(self.dual.coords) # Special case n = 0 if n == 0 : weights[0] = 2.0; return weights # Initialize linear system A = ext.compute_legendre_coeffs(self.dual.coords) b = numpy.zeros(n) b[0] = 2.0 weights = numpy.linalg.solve(A, b) # Weights are computed on interval [-1, 1]. Scale to reference interval return weights/2.0 class LobattoElement(TimeElement): """.""" def __init__(self, degree): "Create Lobatto element with given (polynomial degree)." TimeElement.__init__(self, "Lobatto", degree) class RadauElement(TimeElement): """.""" def __init__(self, degree): "Create Radau element with given (polynomial degree)." TimeElement.__init__(self, "Radau", degree)ffc-1.6.0/ffc/uflacsrepr/000077500000000000000000000000001255571034100151635ustar00rootroot00000000000000ffc-1.6.0/ffc/uflacsrepr/__init__.py000066400000000000000000000002371255571034100172760ustar00rootroot00000000000000from .uflacsrepresentation import compute_integral_ir from .uflacsoptimization import optimize_integral_ir from .uflacsgenerator import generate_integral_code ffc-1.6.0/ffc/uflacsrepr/uflacsgenerator.py000066400000000000000000000030161255571034100207210ustar00rootroot00000000000000# Copyright (C) 2013-2014 Martin Alnaes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . from ffc.log import info from ffc.representationutils import initialize_integral_code from uflacs.backends.ffc.generation import generate_tabulate_tensor_code def generate_integral_code(ir, prefix, parameters): "Generate code for integral from intermediate representation." info("Generating code from uflacs representation") # Generate generic ffc code snippets code = initialize_integral_code(ir, prefix, parameters) # Generate tabulate_tensor body using uflacs algorithms uflacs_code = generate_tabulate_tensor_code(ir, parameters) code["tabulate_tensor"] = uflacs_code["tabulate_tensor"] code["additional_includes_set"] = set() code["additional_includes_set"].update(ir.get("additional_includes_set",())) code["additional_includes_set"].update(uflacs_code["additional_includes_set"]) return code ffc-1.6.0/ffc/uflacsrepr/uflacsoptimization.py000066400000000000000000000017441255571034100214670ustar00rootroot00000000000000# Copyright (C) 2013-2014 Martin Alnaes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . from ffc.log import info def optimize_integral_ir(ir, parameters): "Compute optimized intermediate representation of integral." info("Optimizing uflacs representation") # TODO: Implement optimization of ssa representation prior to code generation here. oir = ir return oir ffc-1.6.0/ffc/uflacsrepr/uflacsrepresentation.py000066400000000000000000000061671255571034100220070ustar00rootroot00000000000000# Copyright (C) 2013-2014 Martin Alnaes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . from ffc.log import info, error, begin, end, debug_ir, ffc_assert, warning from ffc.fiatinterface import create_element from ffc.representationutils import initialize_integral_ir from ffc.quadrature.parameters import parse_optimise_parameters from ffc.quadrature.tabulate_basis import tabulate_basis from ffc.quadrature.quadraturerepresentation import sort_integrals from uflacs.backends.ffc.representation import compute_uflacs_integral_ir def compute_integral_ir(itg_data, form_data, form_id, element_numbers, # FIXME: Not used, what's this for? parameters): "Compute intermediate represention of integral." info("Computing uflacs representation") # Initialise representation ir = initialize_integral_ir("uflacs", itg_data, form_data, form_id) # Sort integrals into a dict with quadrature degree and rule as key sorted_integrals = sort_integrals(itg_data.integrals, itg_data.metadata["quadrature_degree"], itg_data.metadata["quadrature_rule"]) # TODO: Might want to create the uflacs ir first and then create the tables we need afterwards! # Tabulate quadrature points and basis function values in these points integrals_dict, psi_tables, quadrature_rules = \ tabulate_basis(sorted_integrals, form_data, itg_data) # Delegate to flacs to build its intermediate representation and add to ir uflacs_ir = compute_uflacs_integral_ir(psi_tables, ir["entitytype"], integrals_dict, form_data, parameters) # Store uflacs generated part separately ir["uflacs"] = uflacs_ir # Create and save the optisation parameters # TODO: Define uflacs specific optimization parameters instead #ir["optimise_parameters"] = parse_optimise_parameters(parameters) # Save tables for quadrature weights and points ir["quadrature_rules"] = quadrature_rules # Create dimensions of primary indices, needed to reset the argument 'A' # given to tabulate_tensor() by the assembler. ir["prim_idims"] = [create_element(ufl_element).space_dimension() for ufl_element in form_data.argument_elements] # Added for uflacs, not sure if this is the best way to get this: ir["coeff_idims"] = [create_element(ufl_element).space_dimension() for ufl_element in form_data.coefficient_elements] return ir ffc-1.6.0/ffc/utils.py000066400000000000000000000037171255571034100145370ustar00rootroot00000000000000# Copyright (C) 2005-2014 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Kristian B. Oelgaard, 2009 # Modified by Martin Alnaes 2014 # Python modules. import operator import functools import itertools # FFC modules. from .log import error from ufl.utils.sequences import product def all_equal(sequence): "Check that all items in list are equal." return sequence[:-1] == sequence[1:] def pick_first(sequence): "Check that all values are equal and return the value." if not all_equal(sequence): error("Values differ: " + str(sequence)) return sequence[0] def listcopy(sequence): """Create a copy of the list, calling the copy constructor on each object in the list (problems when using copy.deepcopy).""" if not sequence: return [] else: return [object.__class__(object) for object in sequence] def compute_permutations(k, n, skip = []): """Compute all permutations of k elements from (0, n) in rising order. Any elements that are contained in the list skip are not included.""" if k == 1: return [(i,) for i in range(n) if not i in skip] pp = compute_permutations(k - 1, n, skip) permutations = [] for i in range(n): if i in skip: continue for p in pp: if i < p[0]: permutations += [(i, ) + p] return permutations ffc-1.6.0/ffc/wrappers.py000066400000000000000000000100551255571034100152330ustar00rootroot00000000000000# Copyright (C) 2010-2014 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # Python modules from itertools import chain # FFC modules from ffc.log import begin, end, info, error from ffc.utils import all_equal from ffc.cpp import format from ffc.backends.dolfin.wrappers import generate_dolfin_code from ffc.backends.dolfin.capsules import UFCElementNames, UFCFormNames __all__ = ["generate_wrapper_code"] # FIXME: More clean-ups needed here. def generate_wrapper_code(analysis, prefix, object_names, parameters): "Generate code for additional wrappers." # Skip if wrappers not requested if not parameters["format"] == "dolfin": return None # Return dolfin wrapper return _generate_dolfin_wrapper(analysis, prefix, object_names, parameters) def _generate_dolfin_wrapper(analysis, prefix, object_names, parameters): begin("Compiler stage 4.1: Generating additional wrapper code") # Encapsulate data (capsules, common_space) = _encapsulate(prefix, object_names, analysis, parameters) # Generate code info("Generating wrapper code for DOLFIN") code = generate_dolfin_code(prefix, "", capsules, common_space, error_control=parameters["error_control"]) code += "\n\n" end() return code def _encapsulate(prefix, object_names, analysis, parameters): # Extract data from analysis form_datas, elements, element_map = analysis num_form_datas = len(form_datas) common_space = False # Special case: single element if num_form_datas == 0: capsules = _encapsule_element(prefix, elements) # Special case: with error control elif (parameters["error_control"] and num_form_datas == 11): capsules = [_encapsule_form(prefix, object_names, form_data, i, element_map) for (i, form_data) in enumerate(form_datas[:num_form_datas-1])] capsules += [_encapsule_form(prefix, object_names, form_datas[-1], num_form_datas-1, element_map, "GoalFunctional")] # Otherwise: generate standard capsules for each form else: capsules = [_encapsule_form(prefix, object_names, form_data, i, element_map) for (i, form_data) in enumerate(form_datas)] # Check if all elements are equal elements = [] for form_data in form_datas: elements += form_data.elements[:form_data.rank] common_space = all_equal(elements) return (capsules, common_space) def _encapsule_form(prefix, object_names, form_data, i, element_map, superclassname=None): element_numbers = [element_map[e] for e in form_data.elements] if superclassname is None: superclassname = "Form" form_names = UFCFormNames( object_names.get(id(form_data.original_form), "%d" % i), [object_names.get(id(obj), "w%d" % j) for j, obj in enumerate(form_data.reduced_coefficients)], format["classname form"](prefix, i), [format["classname finite_element"](prefix, j) for j in element_numbers], [format["classname dofmap"](prefix, j) for j in element_numbers], superclassname) return form_names def _encapsule_element(prefix, elements): element_number = len(elements) - 1 args = ("0", [format["classname finite_element"](prefix, element_number)], [format["classname dofmap"](prefix, element_number)]) return UFCElementNames(*args) ffc-1.6.0/ffc_time_ext/000077500000000000000000000000001255571034100147135ustar00rootroot00000000000000ffc-1.6.0/ffc_time_ext/Legendre.cpp000066400000000000000000000061151255571034100171470ustar00rootroot00000000000000// Copyright (C) 2003-2008 Anders Logg // // This file is part of FFC. // // FFC is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // FFC is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with FFC. If not, see . // // Modified by Benjamin Kehlet 2011-2012 // // First added: 2012-08-20 // Last changed: 2012-09-05 #include "Legendre.h" #include //----------------------------------------------------------------------------- Legendre::Legendre(unsigned int n) : n(n), cache_x(0.0), cache(n + 1) { cache[0] = 1.0; //constant value // eval to initialize cache eval(n, -1.0); } //----------------------------------------------------------------------------- double Legendre::operator() (double x) { return eval(n, x); } //----------------------------------------------------------------------------- double Legendre::ddx(double x) { return ddx(n, x); } //----------------------------------------------------------------------------- double Legendre::d2dx(double x) { return d2dx(n, x); } //----------------------------------------------------------------------------- double Legendre::eval(unsigned int nn, double x) { //recursive formula, BETA page 254 //return ( (2.0*nn-1.0)*x*eval(nn-1, x) - (nn-1.0)*eval(nn-2, x) ) / nn; //The special cases if (n == 0) return 1.0; else if (n == 1) return x; //check cache if (x != cache_x) { cache[1] = x; for (unsigned int i = 2; i <= n; ++i) { double ii = i; cache[i] = ( (2.0*ii-1.0)*x*cache[i-1] - (ii-1.0)*cache[i-2] ) / ii; } cache_x = x; } return cache[nn]; } //----------------------------------------------------------------------------- double Legendre::ddx(unsigned int n, double x) { // Special cases if (n == 0) return 0.0; else if (n == 1) return 1.0; // Avoid division by zero if (std::abs(x - 1.0) < EPSILON) x -= 2.0*EPSILON; if (std::abs(x + 1.0) < EPSILON) x += 2.0*EPSILON; // Formula, BETA page 254 const double nn = n; return nn * (x*eval(n, x) - eval(n-1, x)) / (x*x - 1.0); } //----------------------------------------------------------------------------- double Legendre::d2dx(unsigned int, double x) { // Special case n = 0 if (n == 0) return 0.0; // Special case n = 1 if (n == 1) return 0.0; // Avoid division by zero if (std::abs(x - 1.0) < EPSILON) x -= 2.0*EPSILON; if (std::abs(x + 1.0) < EPSILON) x += 2.0*EPSILON; // Formula, BETA page 254 const double nn = double(n); return (2.0*x*ddx(n, x) - nn*(nn+1)*eval(n, x)) / (1.0-x*x); } //----------------------------------------------------------------------------- ffc-1.6.0/ffc_time_ext/Legendre.h000066400000000000000000000032131255571034100166100ustar00rootroot00000000000000// Copyright (C) 2003-2009 Anders Logg // // This file is part of FFC. // // DOLFIN is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // DOLFIN is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with DOLFIN. If not, see . // // First added: 2012-08-20 // Last changed: 2012-09-05 #ifndef __LEGENDRE_H #define __LEGENDRE_H /// Legendre polynomial of given degree n on the interval [-1,1]. /// /// P0(x) = 1 /// P1(x) = x /// P2(x) = (3x^2 - 1) / 2 /// ... /// /// The function values and derivatives are computed using /// three-term recurrence formulas. #include #define EPSILON 10e-15 class Legendre { public: Legendre(unsigned int n); /// Evaluation at given point double operator() (double x); /// Evaluation of derivative at given point double ddx(double x); /// Evaluation of second derivative at given point double d2dx(double x); /// Evaluation of arbitrary order, nn <= n (useful ie in RadauQuadrature) double eval(unsigned int nn, double x); double ddx(unsigned int n, double x); double d2dx(unsigned int n, double x); private: const unsigned int n; double cache_x; std::vector cache; }; #endif ffc-1.6.0/ffc_time_ext/LobattoQuadrature.cpp000066400000000000000000000043141255571034100210630ustar00rootroot00000000000000// Copyright (C) 2003-2006 Anders Logg // // This file is part of FFC. // // FFC is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // FFC is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with FFC. If not, see . // // First added: 2012-08-20 // Last changed: 2012-09-05 #include "LobattoQuadrature.h" #include "Legendre.h" #include //----------------------------------------------------------------------------- LobattoQuadrature::LobattoQuadrature(unsigned int n) : points(n) { // FIXME: Do proper arguement checking // if (n < 2) // error("Lobatto quadrature requires at least 2 points."); // init(); // if (!check(2*n - 3)) // error("Lobatto quadrature not ok, check failed."); // Compute the Lobatto quadrature points in [-1,1] as the endpoints // and the zeroes of the derivatives of the Legendre polynomials // using Newton's method //const unsigned int n = points.size(); // Special case n = 1 (should not be used) if (n == 1) { points[0] = 0.0; return; } // Special case n = 2 if (n == 2) { points[0] = -1.0; points[1] = 1.0; return; } Legendre p(n - 1); double x, dx; // Set the first and last nodal points which are 0 and 1 points[0] = -1.0; points[n - 1] = 1.0; // Compute the rest of the nodes by Newton's method for (unsigned int i = 1; i <= ((n-1)/2); i++) { // Initial guess x = cos(3.1415926*double(i)/double(n - 1)); // Newton's method do { dx = -p.ddx(x)/p.d2dx(x); x = x + dx; } while (std::abs(dx) > EPSILON); // Save the value using the symmetry of the points points[i] = -x; points[n - 1 - i] = x; } // Fix the middle node if ((n % 2) != 0) points[n/2] = 0.0; } ffc-1.6.0/ffc_time_ext/LobattoQuadrature.h000066400000000000000000000027621255571034100205350ustar00rootroot00000000000000// Copyright (C) 2003-2009 Anders Logg // // This file is part of FFC. // // FFC is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // FFC is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with FFC. If not, see . // // First added: 2012-08-20 // Last changed: 2012-09-05 #ifndef __LOBATTO_QUADRATURE_H #define __LOBATTO_QUADRATURE_H /// Lobatto (Gauss-Lobatto) quadrature on the interval [-1,1]. /// The n quadrature points are given by the end-points -1 and 1, /// and the zeros of P{n-1}'(x), where P{n-1}(x) is the (n-1):th /// Legendre polynomial. /// /// The quadrature points are computed using Newton's method, and /// the quadrature weights are computed by solving a linear system /// determined by the condition that Lobatto quadrature with n points /// should be exact for polynomials of degree 2n-3. #include class LobattoQuadrature { public: /// Create Lobatto quadrature with n points LobattoQuadrature(unsigned int n); //~LobattoQuadrature(); std::vector points; }; #endif ffc-1.6.0/ffc_time_ext/RadauQuadrature.cpp000066400000000000000000000044201255571034100205110ustar00rootroot00000000000000// Copyright (C) 2003-2006 Anders Logg // // This file is part of FFC. // // FFC is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // FFC is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with FFC. If not, see . // // First added: 2012-08-20 // Last changed: 2012-09-05 #include "RadauQuadrature.h" #include "Legendre.h" #include //----------------------------------------------------------------------------- RadauQuadrature::RadauQuadrature(unsigned int n) : points(n+1) { // Compute the Radau quadrature points in [-1,1] as -1 and the zeros // of ( Pn-1(x) + Pn(x) ) / (1+x) where Pn is the n:th Legendre // polynomial. Computation is a little different than for Gauss and // Lobatto quadrature, since we don't know of any good initial // approximation for the Newton iterations. // Special case n = 1 if (n == 1) { points[0] = -1.0; return; } Legendre p(n); double x, dx, step, sign; // Set size of stepping for seeking starting points step = 1.0/(double(n - 1)*15.0); // Set the first nodal point which is -1 points[0] = -1.0; // Start at -1 + step x = -1.0 + step; // Set the sign at -1 + epsilon sign = ((p.eval(n - 1, x) + p(x)) > 0 ? 1.0 : -1.0); // Compute the rest of the nodes by Newton's method for (unsigned int i = 1; i < n; i++) { // Step to a sign change while ((p.eval(n - 1, x) + p(x))*sign > 0.0) x += step; // Newton's method do { dx = -(p.eval(n-1, x) + p(x))/(p.ddx(n - 1, x) + p.ddx(x)); x = x + dx; } while (std::abs(dx) > EPSILON); // Set the node value points[i] = x; // Fix step so that it's not too large if (step > (points[i] - points[i-1])/10.0) step = (points[i] - points[i-1])/10.0; // Step forward sign = -sign; x += step; } } ffc-1.6.0/ffc_time_ext/RadauQuadrature.h000066400000000000000000000026751255571034100201700ustar00rootroot00000000000000// Copyright (C) 2003-2009 Anders Logg // // This file is part of FFC. // // FFC is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // FFC is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with FFC. If not, see . // // First added: 2012-08-20 // Last changed: 2012-09-05 #ifndef __RADAU_QUADRATURE_H #define __RADAU_QUADRATURE_H /// Radau (Gauss-Radau) quadrature on the interval [-1,1]. /// The n quadrature points are given by the zeros of /// /// ( Pn-1(x) + Pn(x) ) / (1+x) /// /// where Pn is the n:th Legendre polynomial. /// /// The quadrature points are computed using Newton's method, and /// the quadrature weights are computed by solving a linear system /// determined by the condition that Radau quadrature with n points /// should be exact for polynomials of degree 2n-2. #include class RadauQuadrature { public: /// Create Radau quadrature with n points RadauQuadrature(unsigned int n); std::vector points; }; #endif ffc-1.6.0/ffc_time_ext/time_elements.cpp000066400000000000000000000031611255571034100202520ustar00rootroot00000000000000// Copyright (C) 2012 Benjamin Kehlet // // This file is part of FFC. // // FFC is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // FFC is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with FFC. If not, see . // // First added: 2012-08-20 // Last changed: 2012-09-05 #include #include "LobattoQuadrature.h" #include "RadauQuadrature.h" #include "Legendre.h" void compute_lobatto_points(double* points, const unsigned int degree) { // Compute the nodal basis LobattoQuadrature lobatto(degree + 1); for (unsigned int i = 0; i < degree +1; i++) points[i] = (lobatto.points[i] + 1.0) / 2.0; } void compute_radau_points (double* points, const unsigned int degree) { RadauQuadrature radau(degree+1); for (unsigned int i = 0; i < degree+1; i++) points[degree-i] = (-radau.points[i] + 1.0) / 2.0; } void compute_legendre_coeffs(double* coeffs, const double *points, const unsigned int num_points, const unsigned int degree) { for (unsigned int i = 0; i < degree; i++) { Legendre p(i); for (unsigned int j = 0; j < num_points; j++) { coeffs[i*num_points + j] = p(points[j]*2.0 -1.0); } } } ffc-1.6.0/ffc_time_ext/time_elements.h000066400000000000000000000020611255571034100177150ustar00rootroot00000000000000// Copyright (C) 2012 Benjamin Kehlet // // This file is part of FFC. // // FFC is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // FFC is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with FFC. If not, see . // // First added: 2012-08-20 // Last changed: 2012-09-05 void compute_lobatto_points(double* points, const unsigned int degree); void compute_radau_points (double* points, const unsigned int degree); void compute_legendre_coeffs(double* coeffs, const double *points, const unsigned int num_points, const unsigned int degree); ffc-1.6.0/ffc_time_ext/time_elements_interface.cpp000066400000000000000000000101341255571034100222700ustar00rootroot00000000000000// Copyright (C) 2012 Benjamin Kehlet // // This file is part of FFC. // // FFC is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // FFC is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with FFC. If not, see . // // First added: 2012-08-20 // Last changed: 2013-07-11 #include #include #include "time_elements.h" static PyObject *compute_lobatto_interface(PyObject *dummy, PyObject *args) { int degree; /* parse argument tuple */ if (!PyArg_ParseTuple(args, "i", °ree)) { return NULL; /* PyArg_ParseTuple has raised an exception */ } npy_intp n = degree+1; PyArrayObject *py_array_points = (PyArrayObject*) PyArray_SimpleNew(1, &n, NPY_DOUBLE); double *points = (double*) PyArray_DATA(py_array_points); compute_lobatto_points(points, degree); // return values return (PyObject*) py_array_points; } static PyObject *compute_radau_interface(PyObject *dummy, PyObject *args) { int degree; /* parse argument tuple */ if (!PyArg_ParseTuple(args, "i", °ree)) { return NULL; /* PyArg_ParseTuple has raised an exception */ } npy_intp n = degree+1; PyArrayObject *py_array_points = (PyArrayObject*) PyArray_SimpleNew(1, &n, NPY_DOUBLE); double *points = (double*) PyArray_DATA(py_array_points); compute_radau_points(points, degree); // return values return (PyObject*) py_array_points; } static PyObject *compute_legendre_coeffs_interface(PyObject *dummy, PyObject *args) { PyArrayObject *points_array; /* parse argument tuple */ if (!PyArg_ParseTuple(args, "O!", &PyArray_Type, &points_array)) { return NULL; /* PyArg_ParseTuple has raised an exception */ } const npy_intp num_points = PyArray_DIMS(points_array)[0]; npy_intp dims[2] = { num_points, num_points }; PyArrayObject *py_array_coeffs = (PyArrayObject*) PyArray_SimpleNew(2, dims, NPY_DOUBLE); double *coeffs = (double*) PyArray_DATA(py_array_coeffs); compute_legendre_coeffs(coeffs, (double*) PyArray_DATA(points_array), num_points, num_points); // return values return (PyObject*) py_array_coeffs; } static char compute_lobatto_doc[] = \ "Doc string for compute_lobatto_points"; static char compute_radau_doc[] = \ "Doc string for compute_radau_points"; static char compute_legendre_coeffs_doc[] = \ "Doc string for compute_legendre_coeffs"; static PyMethodDef time_elements_ext_methods[] = { {"compute_lobatto_points", compute_lobatto_interface, METH_VARARGS, compute_lobatto_doc}, {"compute_radau_points", compute_radau_interface, METH_VARARGS, compute_radau_doc}, {"compute_legendre_coeffs", compute_legendre_coeffs_interface, METH_VARARGS, compute_legendre_coeffs_doc}, {NULL, NULL} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "time_elements_ext", /* m_name */ "This is a module..", /* m_doc */ -1, /* m_size */ time_elements_ext_methods, /* m_methods */ NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL, /* m_free */ }; PyMODINIT_FUNC PyInit_time_elements_ext(void) { PyObject *m; m = PyModule_Create(&moduledef); import_array(); if(m==NULL) return NULL; return m; } #else PyMODINIT_FUNC inittime_elements_ext(void) { (void)Py_InitModule("time_elements_ext", time_elements_ext_methods); import_array(); } #endif ffc-1.6.0/release.conf000066400000000000000000000021271255571034100145500ustar00rootroot00000000000000# Configuration file for fenics-release PACKAGE="ffc" BRANCH="master" FILES="ChangeLog \ README.rst \ setup.py \ ffc/__init__.py \ ufc/ufc.h \ ufc_benchmark/ufc_benchmark.cpp \ ufc_benchmark/ufc_benchmark.h \ ufc_benchmark/ufc_data.h \ ufc_benchmark/ufc_reference_cell.h \ ffc/backends/ufc/__init__.py \ ffc/backends/ufc/dofmap.py \ ffc/backends/ufc/finite_element.py \ ffc/backends/ufc/form.py \ ffc/backends/ufc/function.py \ ffc/backends/ufc/integrals.py" pre-release() { # Update regression tests echo "Installing FFC locally" python setup.py install --prefix=$PWD/local PYVER=$(python -c 'import sys; print(".".join(map(str, sys.version_info[:2])))') export PYTHONPATH=$PWD/local/lib/python$PYVER/site-packages:$PYTHONPATH export PATH=$PWD/local/bin:$PATH echo "Running regression tests" cd test/regression python test.py --generate-only less error.log echo "Only version numbers should differ, press return to continue" read ./scripts/upload cd - } ffc-1.6.0/scripts/000077500000000000000000000000001255571034100137465ustar00rootroot00000000000000ffc-1.6.0/scripts/ffc000077500000000000000000000160661255571034100144430ustar00rootroot00000000000000#!/usr/bin/env python # This script is the command-line interface to FFC. It parses # command-line arguments and wraps the given form file code in a # Python module which is then executed. # Copyright (C) 2004-2014 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Johan Jansson, 2005. # Modified by Ola Skavhaug, 2006. # Modified by Dag Lindbo, 2008. # Modified by Kristian B. Oelgaard 2010. # Python modules. import sys import getopt import cProfile import re import string import os from os import curdir from os import path from os import getcwd # UFL modules. from ufl.log import UFLException from ufl.algorithms import load_ufl_file import ufl # FFC modules. from ffc.log import info from ffc.log import set_level from ffc.log import DEBUG from ffc.log import ERROR from ffc.parameters import default_parameters from ffc import __version__ as FFC_VERSION from ffc.compiler import compile_form, compile_element from ffc.errorcontrol import compile_with_error_control def error(msg): "Print error message (cannot use log system at top level)." print("\n".join(["*** FFC: " + line for line in msg.split("\n")])) def info_version(): "Print version number." info("""\ This is FFC, the FEniCS Form Compiler, version {0}. For further information, visit http://www.fenics.org/ffc/. """.format(FFC_VERSION)) def info_usage(): "Print usage information." info_version() info("""Usage: ffc [OPTION]... input.form For information about the FFC command-line interface, refer to the FFC man page which may invoked by 'man ffc' (if installed). """) def main(argv): "Main function." # Append current directory to path, such that the *_debug module created by # ufl_load_file can be found when FFC compiles a form which is not in the # PYHTONPATH sys.path.append(getcwd()) # Get parameters and set log level (such that info_usage() will work) parameters = default_parameters() set_level(parameters["log_level"]) # Get command-line arguments try: opts, args = getopt.getopt(argv, \ "hVvsl:r:f:Oo:q:ep", \ ["help", "version", "verbose", "silent", "language=", "representation=", "optimize", "output-directory=", "quadrature-rule=", "error-control", "profile"]) except getopt.GetoptError: info_usage() error("Illegal command-line arguments.") return 1 # Check for --help if ("-h", "") in opts or ("--help", "") in opts: info_usage() return 0 # Check for --version if ("-V", "") in opts or ("--version", "") in opts: info_version() return 0 # Check that we get at least one file if len(args) == 0: error("Missing file.") return 1 # Parse command-line parameters for opt, arg in opts: if opt in ("-v", "--verbose"): parameters["log_level"] = DEBUG elif opt in ("-s", "--silent"): parameters["log_level"] = ERROR elif opt in ("-l", "--language"): parameters["format"] = arg elif opt in ("-r", "--representation"): parameters["representation"] = arg elif opt in ("-q", "--quadrature-rule"): parameters["quadrature_rule"] = arg elif opt == "-f": if len(arg.split("=")) == 2: (key, value) = arg.split("=") parameters[key] = value elif len(arg.split("==")) == 1: key = arg.split("=")[0] parameters[arg] = True else: info_usage() return 1 elif opt in ("-O", "--optimize"): parameters["optimize"] = True elif opt in ("-o", "--output-directory"): parameters["output_dir"] = arg elif opt in ("-e", "--error-control"): parameters["error_control"] = True elif opt in ("-p", "--profile"): parameters["profile"] = True # Set log_level again in case -d or -s was used on the command line set_level(parameters["log_level"]) # Set UFL precision ufl.constantvalue.precision = int(parameters["precision"]) # Print a nice message info_version() # Call parser and compiler for each file for filename in args: # Get filename prefix and suffix prefix, suffix = os.path.splitext(os.path.basename(filename)) suffix = suffix.replace(os.path.extsep, "") # Remove weird characters (file system allows more than the C preprocessor) prefix = re.subn("[^{}]".format(string.ascii_letters + string.digits + "_"), "!", prefix)[0] prefix = re.subn("!+", "_", prefix)[0] # Turn on profiling if parameters.get("profile"): pr = cProfile.Profile() pr.enable() # Check file suffix and load ufl file if suffix == "ufl": ufd = load_ufl_file(filename) elif suffix == "form": error("Old style .form files are no longer supported. Use form2ufl to convert to UFL format.") return 1 else: error("Expecting a UFL form file (.ufl).") return 1 # Do additional stuff if in error-control mode if parameters["error_control"]: return compile_with_error_control(ufd.forms, ufd.object_names, ufd.reserved_objects, prefix, parameters) # Catch exceptions only when not in debug mode if parameters["log_level"] <= DEBUG: if len(ufd.forms) > 0: compile_form(ufd.forms, ufd.object_names, prefix, parameters) else: compile_element(ufd.elements, prefix, parameters) else: try: if len(ufd.forms) > 0: compile_form(ufd.forms, ufd.object_names, prefix, parameters) else: compile_element(ufd.elements, prefix, parameters) except Exception as exception: info("") error(str(exception)) error("To get more information about this error, rerun FFC with --verbose.") return 1 # Turn off profiling and write status to file if parameters.get("profile"): pr.disable() pfn = "ffc_{0}.profile".format(prefix) pr.dump_stats(pfn) info("Wrote profiling info to file {0}".format(pfn)) #pr.print_stats() return 0 if __name__ == "__main__": sys.exit(main(sys.argv[1:])) ffc-1.6.0/scripts/makedist000077500000000000000000000055001255571034100154750ustar00rootroot00000000000000#!/bin/bash # # Copyright (C) 2004-2008 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # This script creates a new release of FFC # Make sure FFC is installed (so we may run unit tests) #echo '--- Uninstalling FFC' #sudo python setup.py install # Make sure we have the current version echo '--- Synchronizing repository' sleep 1 hg commit hg pull ssh://ffc@fenics.org/hg/ffc hg merge hg commit hg update hg push ssh://ffc@fenics.org/hg/ffc # Update version numbers echo '--- Update version number in ChangeLog' sleep 1 emacs -nw ChangeLog echo '--- Update version number in constants.py' sleep 1 emacs -nw ffc/common/constants.py echo '--- Update version number in setup.py' sleep 1 emacs -nw setup.py # Install latest version echo "Running commands for installing FFC locally on my machine. Sorry about that." echo "We need to figure out a better way to organize the makedist script. /Anders" fenics-install fenics-dev # Get the version number VERSION=`grep 'FFC_VERSION' ffc/common/constants.py | cut -d'"' -f2` echo "--- Version number is $VERSION" # Run tests echo '--- Running tests' cd test python test.py echo '--- Only version numbers should differ, press return to continue' read cd regression ./update-references cd ../.. # Run benchmark problem echo '--- Running benchmark problem' cd bench echo "FFC version $VERSION" >> bench.log date >> bench.log echo "" >> bench.log ./bench >> bench.log cd ../ # Tag repository hg tag $VERSION # Commit changes to hg echo '--- Pushing changes to parent repository' sleep 1 hg commit hg push ssh://ffc@fenics.org/hg/ffc # Create archive hg archive -t tgz ffc-$VERSION.tar.gz # Copy files to web page echo '--- Copying files to web server' scp ffc-$VERSION.tar.gz fenics@fenics.org:www.fenics.org/pub/software/ffc/v0.7 scp ChangeLog fenics@fenics.org:www.fenics.org/pub/software/ffc/ scp TODO fenics@fenics.org:www.fenics.org/pub/software/ffc/ # Notify ffc-dev of the new version echo '--- Notifying mailing list' SUBJECT="Version "$VERSION" of FFC released" cat ChangeLog | mail -s "$SUBJECT" ffc-dev@fenics.org # Edit web pages echo '--- Edit web pages' ssh -t fenics@fenics.org '/home/fenics/local/bin/news' firefox http://www.fenics.org/wiki/Download # Notify pypi python setup.py register ffc-1.6.0/scripts/makedoc000077500000000000000000000030411255571034100152750ustar00rootroot00000000000000# Copyright (C) 2011 Marie E. Rognes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2011-11-16 # Last changed: 2011-11-16 # # This is a utility script for generating .rst and .html # documentation for FFC. # # Run from the top level FFC directory: # # ./scripts/makedoc # echo "" echo "--- Generating FFC html documentation" echo "" SPHINX_DIR=./doc/sphinx SPHINX_SCRIPT_DIR=$SPHINX_DIR/scripts SPHINX_SOURCE_DIR=$SPHINX_DIR/source # Generate .rst files $SPHINX_SCRIPT_DIR/generate_modules.py ffc --dest-dir=$SPHINX_SOURCE_DIR --suffix=rst --force echo "" echo "--- reSTructured text files generated in doc/sphinx/source/" echo "" # Generate index (and add some labels) VERSION=`grep '__version__' ffc/__init__.py | cut -d'"' -f2` $SPHINX_SCRIPT_DIR/generate_index.py $SPHINX_SOURCE_DIR $VERSION # Run sphinx make html cd $SPHINX_DIR make clean make html echo "" echo "--- HTML files generated in $SPHINX_DIR/build/html" echo "" ffc-1.6.0/setup.py000077500000000000000000000303141255571034100137750ustar00rootroot00000000000000#!/usr/bin/env python import os, sys, platform, re, subprocess, string, numpy, tempfile, shutil from distutils import sysconfig, spawn from distutils.core import setup, Extension from distutils.command import build_ext from distutils.command.build import build from distutils.ccompiler import new_compiler from distutils.version import LooseVersion if sys.version_info < (2, 7): print("Python 2.7 or higher required, please upgrade.") sys.exit(1) VERSION = re.findall('__version__ = "(.*)"', open('ffc/__init__.py', 'r').read())[0] SCRIPTS = [os.path.join("scripts", "ffc")] AUTHORS = """\ Anders Logg, Kristian Oelgaard, Marie Rognes, Garth N. Wells, Martin Sandve Alnaes, Hans Petter Langtangen, Kent-Andre Mardal, Ola Skavhaug, et al. """ CLASSIFIERS = """\ Development Status :: 5 - Production/Stable Intended Audience :: Developers Intended Audience :: Science/Research License :: OSI Approved :: GNU General Public License v2 (GPLv2) License :: Public Domain Operating System :: MacOS :: MacOS X Operating System :: Microsoft :: Windows Operating System :: POSIX Operating System :: POSIX :: Linux Programming Language :: C++ Programming Language :: Python Topic :: Scientific/Engineering :: Mathematics Topic :: Software Development :: Libraries """ def get_installation_prefix(): "Get installation prefix" try: prefix = [item for item in sys.argv[1:] \ if "--prefix=" in item][0].split("=")[1] except: try: prefix = sys.argv[sys.argv.index("--prefix")+1] except: if platform.system() == "Windows": prefix = sys.prefix else: prefix = "/usr/local" return os.path.abspath(os.path.expanduser(prefix)) def get_swig_executable(): "Get SWIG executable" # Find SWIG executable swig_executable = None swig_minimum_version = "3.0.3" for executable in ["swig", "swig3.0"]: swig_executable = spawn.find_executable(executable) if swig_executable is not None: # Check that SWIG version is ok output = subprocess.check_output([swig_executable, "-version"]).decode('utf-8') swig_version = re.findall(r"SWIG Version ([0-9.]+)", output)[0] if LooseVersion(swig_version) >= LooseVersion(swig_minimum_version): break swig_executable = None if swig_executable is None: raise OSError("Unable to find SWIG version %s or higher." % swig_minimum_version) print("Found SWIG: %s (version %s)" % (swig_executable, swig_version)) return swig_executable def create_windows_batch_files(scripts): """Create Windows batch files, to get around problem that we cannot run Python scripts in the prompt without the .py extension.""" batch_files = [] for script in scripts: batch_file = script + ".bat" f = open(batch_file, "w") f.write("python \"%%~dp0\%s\" %%*\n" % os.path.split(script)[1]) f.close() batch_files.append(batch_file) scripts.extend(batch_files) return scripts def write_config_file(infile, outfile, variables={}): "Write config file based on template" class AtTemplate(string.Template): delimiter = "@" s = AtTemplate(open(infile, "r").read()) s = s.substitute(**variables) a = open(outfile, "w") try: a.write(s) finally: a.close() def find_python_library(): "Return the full path to the Python library (empty string if not found)" pyver = sysconfig.get_python_version() libpython_names = [ "python%s" % pyver.replace(".", ""), "python%smu" % pyver, "python%sm" % pyver, "python%su" % pyver, "python%s" % pyver, ] dirs = [ "%s/lib" % os.environ.get("PYTHON_DIR", ""), "%s" % sysconfig.get_config_vars().get("LIBDIR", ""), "/usr/lib/%s" % sysconfig.get_config_vars().get("MULTIARCH", ""), "/usr/local/lib", "/opt/local/lib", "/usr/lib", "/usr/lib64", ] libpython = None cc = new_compiler() for name in libpython_names: libpython = cc.find_library_file(dirs, name) if libpython is not None: break return libpython or "" def generate_config_files(SWIG_EXECUTABLE, CXX_FLAGS): "Generate and install configuration files" # Get variables INSTALL_PREFIX = get_installation_prefix() PYTHON_LIBRARY = os.environ.get("PYTHON_LIBRARY", find_python_library()) MAJOR, MINOR, MICRO = VERSION.split(".") # Generate UFCConfig.cmake write_config_file(os.path.join("cmake", "templates", "UFCConfig.cmake.in"), os.path.join("cmake", "templates", "UFCConfig.cmake"), variables=dict(INSTALL_PREFIX=INSTALL_PREFIX, CXX_FLAGS=CXX_FLAGS.strip(), PYTHON_INCLUDE_DIR=sysconfig.get_python_inc(), PYTHON_LIBRARY=PYTHON_LIBRARY, PYTHON_EXECUTABLE=sys.executable, SWIG_EXECUTABLE=SWIG_EXECUTABLE, FULLVERSION=VERSION)) # Generate UFCConfigVersion.cmake write_config_file(os.path.join("cmake", "templates", \ "UFCConfigVersion.cmake.in"), os.path.join("cmake", "templates", \ "UFCConfigVersion.cmake"), variables=dict(FULLVERSION=VERSION, MAJOR=MAJOR, MINOR=MINOR, MICRO=MICRO)) # Generate UseUFC.cmake write_config_file(os.path.join("cmake", "templates", "UseUFC.cmake.in"), os.path.join("cmake", "templates", "UseUFC.cmake")) # FIXME: Generation of pkgconfig file may no longer be needed, so # FIXME: we may consider removing this. # Generate ufc-1.pc write_config_file(os.path.join("cmake", "templates", "ufc-1.pc.in"), os.path.join("cmake", "templates", "ufc-1.pc"), variables=dict(FULLVERSION=VERSION, INSTALL_PREFIX=INSTALL_PREFIX, CXX_FLAGS=CXX_FLAGS)) def has_cxx_flag(cc, flag): "Return True if compiler supports given flag" tmpdir = tempfile.mkdtemp(prefix="ffc-build-") devnull = oldstderr = None try: try: fname = os.path.join(tmpdir, "flagname.cpp") f = open(fname, "w") f.write("int main() { return 0;}") f.close() # Redirect stderr to /dev/null to hide any error messages # from the compiler. devnull = open(os.devnull, 'w') oldstderr = os.dup(sys.stderr.fileno()) os.dup2(devnull.fileno(), sys.stderr.fileno()) cc.compile([fname], output_dir=tmpdir, extra_preargs=[flag]) except: return False return True finally: if oldstderr is not None: os.dup2(oldstderr, sys.stderr.fileno()) if devnull is not None: devnull.close() shutil.rmtree(tmpdir) def run_install(): "Run installation" # Create batch files for Windows if necessary scripts = SCRIPTS if platform.system() == "Windows" or "bdist_wininst" in sys.argv: scripts = create_windows_batch_files(scripts) # Subclass extension building command to ensure that distutils to # finds the correct SWIG executable SWIG_EXECUTABLE = get_swig_executable() class my_build_ext(build_ext.build_ext): def find_swig(self): return SWIG_EXECUTABLE # Subclass the build command to ensure that build_ext produces # ufc.py before build_py tries to copy it. class my_build(build): def run(self): self.run_command('build_ext') build.run(self) # Check that compiler supports C++11 features cc = new_compiler() CXX = os.environ.get("CXX") if CXX: cc.set_executables(compiler_so=CXX, compiler=CXX, compiler_cxx=CXX) CXX_FLAGS = os.environ.get("CXXFLAGS", "") if has_cxx_flag(cc, "-std=c++11"): CXX_FLAGS += " -std=c++11" elif has_cxx_flag(cc, "-std=c++0x"): CXX_FLAGS += " -std=c++0x" # Generate config files generate_config_files(SWIG_EXECUTABLE, CXX_FLAGS) # Setup extension module for FFC time elements numpy_include_dir = numpy.get_include() ext_module_time = Extension("ffc_time_ext.time_elements_ext", sources=["ffc_time_ext/time_elements_interface.cpp", "ffc_time_ext/time_elements.cpp", "ffc_time_ext/LobattoQuadrature.cpp", "ffc_time_ext/RadauQuadrature.cpp", "ffc_time_ext/Legendre.cpp"], include_dirs = [numpy_include_dir]) # Setup extension module for UFC swig_options = ["-c++", "-shadow", "-modern", "-modernargs", "-fastdispatch", "-fvirtual", "-nosafecstrings", "-noproxydel", "-fastproxy", "-fastinit", "-fastunpack", "-fastquery", "-nobuildnone"] if sys.version_info[0] > 2: swig_options.insert(0, "-py3") ext_module_ufc = Extension("ufc._ufc", sources=[os.path.join("ufc", "ufc.i")], depends=[os.path.join("ufc", "ufc.h"), os.path.join("ufc", "ufc_geometry.h")], swig_opts=swig_options, extra_compile_args=CXX_FLAGS.split(), include_dirs=[os.path.join("ufc")]) # Call distutils to perform installation setup(name = "FFC", description = "The FEniCS Form Compiler", version = VERSION, author = AUTHORS, classifiers = [_f for _f in CLASSIFIERS.split('\n') if _f], license = "LGPL version 3 or later", author_email = "fenics@fenicsproject.org", maintainer_email = "fenics@fenicsproject.org", url = "http://fenicsproject.org/", platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], packages = ["ffc", "ffc.quadrature", "ffc.tensor", "ffc.uflacsrepr", "ffc.errorcontrol", "ffc.backends", "ffc.backends.dolfin", "ffc.backends.ufc", "ufc"], package_dir = {"ffc": "ffc", "ufc": "ufc"}, scripts = scripts, include_dirs = [numpy.get_include()], ext_modules = [ext_module_time, ext_module_ufc], cmdclass = {"build": my_build, "build_ext": my_build_ext}, data_files = [(os.path.join("share", "man", "man1"), [os.path.join("doc", "man", "man1", "ffc.1.gz")]), (os.path.join("include"), [os.path.join("ufc", "ufc.h"), os.path.join("ufc", "ufc_geometry.h")]), (os.path.join("share", "ufc"), [os.path.join("cmake", "templates", \ "UFCConfig.cmake"), os.path.join("cmake", "templates", \ "UFCConfigVersion.cmake"), os.path.join("cmake", "templates", \ "UseUFC.cmake")]), (os.path.join("lib", "pkgconfig"), [os.path.join("cmake", "templates", "ufc-1.pc")]), (os.path.join("include", "swig"), [os.path.join("ufc", "ufc.i"), os.path.join("ufc", "ufc_shared_ptr_classes.i")])]) if __name__ == "__main__": run_install() ffc-1.6.0/test/000077500000000000000000000000001255571034100132365ustar00rootroot00000000000000ffc-1.6.0/test/evaluate_basis/000077500000000000000000000000001255571034100162255ustar00rootroot00000000000000ffc-1.6.0/test/evaluate_basis/cppcode.py000066400000000000000000000045311255571034100202170ustar00rootroot00000000000000"This module provides simple C++ code for verification of UFC code." # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-18 # Last changed: 2010-01-18 # Common code for all integral types evaluate_basis_code = """\ #include #include #include "test.h" int main() { // Create element %(element)s element; // Size of dof_values // FIXME: This will not work for TensorElements int N = element.value_dimension(0); // Create values double* dof_values = new double[N]; for (int i = 0; i < N; i++) dof_values[i] = 0.0; // Create cell and fill with some arbitrary data double cell_coordinates[8][3] = {{0.90, 0.34, 0.45}, {0.56, 0.76, 0.83}, {0.98, 0.78, 0.19}, {0.12, 0.56, 0.66}, {0.96, 0.78, 0.63}, {0.11, 0.35, 0.49}, {0.51, 0.88, 0.65}, {0.98, 0.45, 0.01}}; ufc::cell cell; cell.coordinates = new double * [8]; for (int i = 0; i < 8; i++) { cell.coordinates[i] = new double[3]; for (int j = 0; j < 3; j++) cell.coordinates[i][j] = cell_coordinates[i][j]; } // Random coordinates where we want to evaluate the basis functions double coordinates[3] = {0.32, 0.51, 0.05}; // Loop element space dimension and call evaluate_basis. for (unsigned int i = 0; i < element.space_dimension(); i++) { element.evaluate_basis(i, dof_values, coordinates, cell); // Print values for (int j = 0; j < N; j++) std::cout << dof_values[j] << " "; } std::cout << std::endl; return 0; } """ ffc-1.6.0/test/evaluate_basis/test.py000066400000000000000000000227761255571034100175740ustar00rootroot00000000000000# Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-18 # Last changed: 2010-01-18 from __funture__ import print_function from cppcode import evaluate_basis_code from ufl import FiniteElement, MixedElement from instant.output import get_status_output import sys, os, pickle, numpy, shutil # Elements, supported by FFC and FIAT, and their supported shape and orders single_elements = [ {"family": "Lagrange",\ "shapes": ["interval", "triangle", "tetrahedron"],\ "orders": [1, 2, 3, 4]},\ {"family": "Discontinuous Lagrange",\ "shapes": ["interval", "triangle", "tetrahedron"],\ "orders": [0, 1, 2, 3, 4]},\ {"family": "Crouzeix-Raviart",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1]},\ {"family": "Raviart-Thomas",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1, 2, 3]},\ {"family": "Discontinuous Raviart-Thomas",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1, 2, 3]},\ {"family": "Brezzi-Douglas-Marini",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1, 2, 3]},\ {"family": "Brezzi-Douglas-Fortin-Marini",\ "shapes": ["triangle"],\ "orders": [2]},\ {"family": "Nedelec 1st kind H(curl)",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1, 2, 3]},\ {"family": "Nedelec 2nd kind H(curl)",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1, 2, 3]}] # Create some mixed elements dg0_tri = FiniteElement("DG", "triangle", 0) dg1_tri = FiniteElement("DG", "triangle", 1) cg1_tri = FiniteElement("CG", "triangle", 1) cr1_tri = FiniteElement("CR", "triangle", 1) rt1_tri = FiniteElement("RT", "triangle", 1) drt2_tri = FiniteElement("DRT", "triangle", 2) bdm1_tri = FiniteElement("BDM", "triangle", 1) ned1_tri = FiniteElement("N1curl", "triangle", 1) dg0_tet = FiniteElement("DG", "tetrahedron", 0) dg1_tet = FiniteElement("DG", "tetrahedron", 1) cg1_tet = FiniteElement("CG", "tetrahedron", 1) cr1_tet = FiniteElement("CR", "tetrahedron", 1) rt1_tet = FiniteElement("RT", "tetrahedron", 1) drt2_tet = FiniteElement("DRT", "tetrahedron", 2) bdm1_tet = FiniteElement("BDM", "tetrahedron", 1) ned1_tet = FiniteElement("N1curl", "tetrahedron", 1) mixed_elements = [MixedElement([dg0_tri]*4), MixedElement([cg1_tri]*3), MixedElement([bdm1_tri]*2),\ MixedElement([dg1_tri, cg1_tri, cr1_tri, rt1_tri, bdm1_tri, ned1_tri]),\ MixedElement([MixedElement([rt1_tri, cr1_tri]), cg1_tri, ned1_tri]),\ MixedElement([ned1_tri, dg1_tri, MixedElement([rt1_tri, cr1_tri])]),\ MixedElement([drt2_tri, cg1_tri]),\ MixedElement([dg0_tet]*4), MixedElement([cg1_tet]*3), MixedElement([bdm1_tet]*2),\ MixedElement([dg1_tet, cg1_tet, cr1_tet, rt1_tet, bdm1_tet, ned1_tet]),\ MixedElement([MixedElement([rt1_tet, cr1_tet]), cg1_tet, ned1_tet]),\ MixedElement([ned1_tet, dg1_tet, MixedElement([rt1_tet, cr1_tet])]),\ MixedElement([drt2_tet, cg1_tet])] ffc_failed = [] gcc_failed = [] run_failed = [] def check_results(values, reference): "Check results and print summary." missing_refs = [] diffs = [] num_ok = 0 print("") for element, vals in list(values.items()): print("\nResults for %s:" % element) if vals is None: print("Error") continue # Get reference values if not element in reference: missing_refs.append(element) print("Missing reference") continue refs = reference[element] tol = 1e-12 e = max(abs(vals - refs)) if e < tol: num_ok += 1 print("OK: (diff = %g)" % e) else: print("*** (diff = %g)" % e) diffs.append(element) if ffc_failed == gcc_failed == run_failed == missing_refs == diffs: print("\nAll %d elements verified OK" % len(reference)) return 0 if len(ffc_failed) > 0: print("\n*** FFC compilation failed for the following elements:\n" + "\n".join(ffc_failed)) if len(gcc_failed) > 0: print("\n*** g++ compilation failed for the following elements:\n" + "\n".join(gcc_failed)) if len(run_failed) > 0: print("\n*** Evaluation failed (seg. fault?) for the following elements:\n" + "\n".join(run_failed)) if len(missing_refs) > 0: print("\n*** No reference values were found for the following elements:\n" + "\n".join(missing_refs)) if len(diffs) > 0: print("\n*** Difference in values were found for the following elements:\n" + "\n".join(diffs)) return 1 def compile_element(ufl_element): "Create UFL form file with a single element in it and compile it with FFC" f = open("test.ufl", "w") if isinstance(ufl_element, (FiniteElement, MixedElement)): f.write("element = " + repr(ufl_element)) f.close() error, out = get_status_output("ffc test.ufl") if error: ffc_failed.append(repr(ufl_element)) return error def get_element_name(ufl_element): "Extract relevant element name from header file." f = open("test.h") lines = f.readlines() f.close() signature = repr(ufl_element) name = None for e, l in enumerate(lines): if "class" in l and "finite_element" in l: name = l if signature in l: break if name is None: raise RuntimeError("No finite element class found") return name.split()[1][:-1] def compute_values(ufl_element): "Compute values of basis functions for given element." # Get relevant element name element_name = get_element_name(ufl_element) # Create g++ code options = {"element": element_name} code = evaluate_basis_code % options f = open("evaluate_basis.cpp", "w") f.write(code) f.close() # Get UFC flags ufc_cflags = get_status_output("pkg-config --cflags ufc-1")[1].strip() # Compile g++ code c = "g++ %s -Wall -Werror -o evaluate_basis evaluate_basis.cpp" % ufc_cflags error, output = get_status_output(c) if error: gcc_failed.append(repr(ufl_element)) return None # Run compiled code and get values error, output = get_status_output(".%sevaluate_basis" % os.path.sep) if error: run_failed.append(repr(ufl_element)) return None values = [float(value) for value in output.split(" ") if len(value) > 0] return numpy.array(values) def print_refs(): if os.path.isfile("reference.pickle"): reference = pickle.load(open("reference.pickle", "r")) for elem, vals in list(reference.items()): print() print(elem) print(vals) else: raise RuntimeError("No references to print") def main(args): "Call evaluate basis for a range of different elements." if "refs" in args: print_refs() return 0 # Change to temporary folder and copy form files if not os.path.isdir("tmp"): os.mkdir("tmp") os.chdir("tmp") values = {} # Evaluate basis for single elements print("\nComputing evaluate_basis for single elements") for element in single_elements: for shape in element["shapes"]: for order in element["orders"]: ufl_element = FiniteElement(element["family"], shape, order) print("Compiling element: ", str(ufl_element)) error = compile_element(ufl_element) if error: continue print("Computing values") values[repr(ufl_element)] = compute_values(ufl_element) # Evaluate basis for mixed elements print("\nComputing evaluate_basis for mixed elements") for ufl_element in mixed_elements: print("Compiling element: ", str(ufl_element)) error = compile_element(ufl_element) if error: continue print("Computing values") values[repr(ufl_element)] = compute_values(ufl_element) # Load or update reference values os.chdir(os.pardir) if os.path.isfile("reference.pickle"): reference = pickle.load(open("reference.pickle", "r")) else: print("Unable to find reference values, storing current values.") pickle.dump(values, open("reference.pickle", "w")) return 0 # Check results error = check_results(values, reference) if not error: # Remove temporary directory shutil.rmtree("tmp") return error if __name__ == "__main__": sys.exit(main(sys.argv[1:])) ffc-1.6.0/test/evaluate_basis_derivatives/000077500000000000000000000000001255571034100206325ustar00rootroot00000000000000ffc-1.6.0/test/evaluate_basis_derivatives/cppcode.py000066400000000000000000000046331255571034100226270ustar00rootroot00000000000000"This module provides simple C++ code for verification of UFC code." # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-18 # Last changed: 2010-01-18 # Common code for all integral types evaluate_basis_derivatives_code = """\ #include #include #include "test.h" int main() { // Create element %(element)s element; // Size of dof_values // FIXME: This will not work for TensorElements int N = element.value_dimension(0)*%(num_derivatives)d; // Create values double* dof_values = new double[N]; for (int i = 0; i < N; i++) dof_values[i] = 0.0; // Create cell and fill with some arbitrary data double cell_coordinates[8][3] = {{0.90, 0.34, 0.45}, {0.56, 0.76, 0.83}, {0.98, 0.78, 0.19}, {0.12, 0.56, 0.66}, {0.96, 0.78, 0.63}, {0.11, 0.35, 0.49}, {0.51, 0.88, 0.65}, {0.98, 0.45, 0.01}}; ufc::cell cell; cell.coordinates = new double * [8]; for (int i = 0; i < 8; i++) { cell.coordinates[i] = new double[3]; for (int j = 0; j < 3; j++) cell.coordinates[i][j] = cell_coordinates[i][j]; } // Random coordinates where we want to evaluate the basis functions double coordinates[3] = {0.32, 0.51, 0.05}; // Loop element space dimension and call evaluate_basis. for (unsigned int i = 0; i < element.space_dimension(); i++) { element.evaluate_basis_derivatives(i, %(derivative_order)d, dof_values, coordinates, cell); // Print values for (int j = 0; j < N; j++) std::cout << dof_values[j] << " "; } std::cout << std::endl; return 0; } """ ffc-1.6.0/test/evaluate_basis_derivatives/test.py000066400000000000000000000247021255571034100221700ustar00rootroot00000000000000# Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-18 # Last changed: 2010-01-18 from __future__ import print_function from cppcode import evaluate_basis_derivatives_code from ufl import FiniteElement, MixedElement from instant.output import get_status_output import sys, os, pickle, numpy, shutil # Elements, supported by FFC and FIAT, and their supported shape and orders single_elements = [ {"family": "Lagrange",\ "shapes": ["interval", "triangle", "tetrahedron"],\ "orders": [1, 2, 3, 4]},\ {"family": "Discontinuous Lagrange",\ "shapes": ["interval", "triangle", "tetrahedron"],\ "orders": [0, 1, 2, 3, 4]},\ {"family": "Crouzeix-Raviart",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1]},\ {"family": "Raviart-Thomas",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1, 2, 3]},\ {"family": "Discontinuous Raviart-Thomas",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1, 2, 3]},\ {"family": "Brezzi-Douglas-Marini",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1, 2, 3]},\ {"family": "Brezzi-Douglas-Fortin-Marini",\ "shapes": ["triangle"],\ "orders": [2]},\ {"family": "Nedelec 1st kind H(curl)",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1, 2, 3]},\ {"family": "Nedelec 2nd kind H(curl)",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1, 2, 3]}] # Create some mixed elements dg0_tri = FiniteElement("DG", "triangle", 0) dg1_tri = FiniteElement("DG", "triangle", 1) cg1_tri = FiniteElement("CG", "triangle", 1) cr1_tri = FiniteElement("CR", "triangle", 1) rt1_tri = FiniteElement("RT", "triangle", 1) drt2_tri = FiniteElement("DRT", "triangle", 2) bdm1_tri = FiniteElement("BDM", "triangle", 1) ned1_tri = FiniteElement("N1curl", "triangle", 1) dg0_tet = FiniteElement("DG", "tetrahedron", 0) dg1_tet = FiniteElement("DG", "tetrahedron", 1) cg1_tet = FiniteElement("CG", "tetrahedron", 1) cr1_tet = FiniteElement("CR", "tetrahedron", 1) rt1_tet = FiniteElement("RT", "tetrahedron", 1) drt2_tet = FiniteElement("DRT", "tetrahedron", 2) bdm1_tet = FiniteElement("BDM", "tetrahedron", 1) ned1_tet = FiniteElement("N1curl", "tetrahedron", 1) mixed_elements = [MixedElement([dg0_tri]*4), MixedElement([cg1_tri]*3), MixedElement([bdm1_tri]*2),\ MixedElement([dg1_tri, cg1_tri, cr1_tri, rt1_tri, bdm1_tri, ned1_tri]),\ MixedElement([MixedElement([rt1_tri, cr1_tri]), cg1_tri, ned1_tri]),\ MixedElement([ned1_tri, dg1_tri, MixedElement([rt1_tri, cr1_tri])]),\ MixedElement([drt2_tri, cg1_tri]),\ MixedElement([dg0_tet]*4), MixedElement([cg1_tet]*3), MixedElement([bdm1_tet]*2),\ MixedElement([dg1_tet, cg1_tet, cr1_tet, rt1_tet, bdm1_tet, ned1_tet]),\ MixedElement([MixedElement([rt1_tet, cr1_tet]), cg1_tet, ned1_tet]),\ MixedElement([ned1_tet, dg1_tet, MixedElement([rt1_tet, cr1_tet])]),\ MixedElement([drt2_tet, cg1_tet])] ffc_failed = [] gcc_failed = [] run_failed = [] def check_results(values, reference): "Check results and print summary." missing_refs = [] diffs = [] num_ok = 0 print("") for element, deriv_orders in list(values.items()): print("\nResults for %s:" % element) for deriv_order, vals in list(deriv_orders.items()): if vals is None: print("Error") continue # Get reference values if not element in reference or not deriv_order in reference[element]: missing_refs.append(element + "order %d" % deriv_order) print("Missing reference") continue refs = reference[element][deriv_order] tol = 1e-12 e = max(abs(vals - refs)) if e < tol: num_ok += 1 print("Derivative order: %d, OK: (diff = %g)" % (deriv_order, e)) else: print("*** (Derivative order: %d, diff = %g)" % (deriv_order, e)) diffs.append(element + "order %d" % deriv_order) if ffc_failed == gcc_failed == run_failed == missing_refs == diffs: print("\nAll %d elements verified OK" % len(reference)) return 0 if len(ffc_failed) > 0: print("\n*** FFC compilation failed for the following elements:\n" + "\n".join(ffc_failed)) if len(gcc_failed) > 0: print("\n*** g++ compilation failed for the following elements:\n" + "\n".join(gcc_failed)) if len(run_failed) > 0: print("\n*** Evaluation failed (seg. fault?) for the following elements:\n" + "\n".join(run_failed)) if len(missing_refs) > 0: print("\n*** No reference values were found for the following elements:\n" + "\n".join(missing_refs)) if len(diffs) > 0: print("\n*** Difference in values were found for the following elements:\n" + "\n".join(diffs)) return 1 def compile_element(ufl_element): "Create UFL form file with a single element in it and compile it with FFC" f = open("test.ufl", "w") if isinstance(ufl_element, (FiniteElement, MixedElement)): f.write("element = " + repr(ufl_element)) f.close() error, out = get_status_output("ffc test.ufl") if error: ffc_failed.append(repr(ufl_element)) return error def get_element_name(ufl_element): "Extract relevant element name from header file." f = open("test.h") lines = f.readlines() f.close() signature = repr(ufl_element) name = None for e, l in enumerate(lines): if "class" in l and "finite_element" in l: name = l if signature in l: break if name is None: raise RuntimeError("No finite element class found") return name.split()[1][:-1] def compute_values(ufl_element, deriv_order): "Compute values of basis functions for given element." # Get relevant element name element_name = get_element_name(ufl_element) # Create g++ code domain, = ufl_element.domains() # Assuming single domain num_derivs = domain.topological_dimension()**deriv_order options = {"element": element_name, "derivative_order":deriv_order, "num_derivatives":num_derivs} code = evaluate_basis_derivatives_code % options f = open("evaluate_basis_derivatives.cpp", "w") f.write(code) f.close() # Get UFC flags ufc_cflags = get_status_output("pkg-config --cflags ufc-1")[1].strip() # Compile g++ code c = "g++ %s -Wall -Werror -o evaluate_basis_derivatives evaluate_basis_derivatives.cpp" % ufc_cflags error, output = get_status_output(c) if error: gcc_failed.append(repr(ufl_element)) return None # Run compiled code and get values error, output = get_status_output(".%sevaluate_basis_derivatives" % os.path.sep) if error: run_failed.append(repr(ufl_element)) return None values = [float(value) for value in output.split(" ") if len(value) > 0] return numpy.array(values) def print_refs(): if os.path.isfile("reference.pickle"): reference = pickle.load(open("reference.pickle", "r")) for elem, derivs in list(reference.items()): for deriv_order, vals in list(derivs.items()): print() print(elem) print(deriv_order) print(vals) else: raise RuntimeError("No references to print") def main(args): "Call evaluate basis derivatives for a range of different elements." if "refs" in args: print_refs() return 0 # Change to temporary folder and copy form files if not os.path.isdir("tmp"): os.mkdir("tmp") os.chdir("tmp") values = {} # Evaluate basis for single elements print("\nComputing evaluate_basis_derivatives for single elements") for element in single_elements: for shape in element["shapes"]: for order in element["orders"]: ufl_element = FiniteElement(element["family"], shape, order) print("Compiling element: ", str(ufl_element)) error = compile_element(ufl_element) if error: continue print("Computing values") values[repr(ufl_element)] = {} for deriv_order in range(1,4): values[repr(ufl_element)][deriv_order] = compute_values(ufl_element, deriv_order) # Evaluate basis for single elements print("\nComputing evaluate_basis_derivatives for mixed elements") for ufl_element in mixed_elements: print("Compiling element: ", str(ufl_element)) error = compile_element(ufl_element) if error: continue print("Computing values") values[repr(ufl_element)] = {} for deriv_order in range(1,4): values[repr(ufl_element)][deriv_order] = compute_values(ufl_element, deriv_order) # Load or update reference values os.chdir(os.pardir) if os.path.isfile("reference.pickle"): reference = pickle.load(open("reference.pickle", "r")) else: print("Unable to find reference values, storing current values.") pickle.dump(values, open("reference.pickle", "w")) return 0 # Check results error = check_results(values, reference) if not error: # Remove temporary directory shutil.rmtree("tmp") return error if __name__ == "__main__": sys.exit(main(sys.argv[1:])) ffc-1.6.0/test/regression/000077500000000000000000000000001255571034100154165ustar00rootroot00000000000000ffc-1.6.0/test/regression/README.rst000066400000000000000000000036321255571034100171110ustar00rootroot00000000000000How to run regression tests =========================== To run regression tests with default parameters, simply run: cd /tests/regression/ python test.py Look at test.py for more options. How to update references ======================== To update the references for the FFC regression tests, first commit your changes, then run the regression test (to generate the new references) and finally run the script upload: cd /tests/regression/ python test.py ./scripts/upload Note: You may be asked for your *Bitbucket* username and password when uploading the reference data, if use of ssh keys fails. Note: The upload script will push the new references to the ffc-reference-data repository. This is harmless even if these references are not needed later. Note: The upload script will update the file ffc-regression-data-id and commit this change to the currently active branch, remember to include this commit when merging or pushing your changes elsewhere. Note: You can cherry-pick the commit that updated ffc-regression-data-id into another branch to use the same set of references there. Note: If you ever get merge conflicts in the ffc-regression-data-id, always pick one version of the file. Most likely you'll need to update the references again. How to run regression tests against a different set of regression data ====================================================================== To run regression tests and compare to a different set of regression data, perhaps to see what has changed in generated code since a certain version, check out the ffc-regression-data-id file you want and run tests as usual cd /tests/regression/ git checkout ffc-regression-data-id python test.py The test.py script will run scripts/download which will check out the regression data with the commit id from ffc-regression-data-id in ffc-regression-data/. ffc-1.6.0/test/regression/elements.py000066400000000000000000000032231255571034100176040ustar00rootroot00000000000000interval_2D = "Cell('interval', geometric_dimension=2)" interval_3D = "Cell('interval', geometric_dimension=3)" triangle_3D = "Cell('triangle', geometric_dimension=3)" elements = ["FiniteElement('N1curl', triangle, 2)", "MixedElement([FiniteElement('Lagrange', triangle, 3), \ VectorElement('Lagrange', triangle, 3)['facet']])", "VectorElement('R', triangle, 0, 3)", "VectorElement('DG', %s, 1)" % interval_2D, "VectorElement('DG', %s, 1)" % interval_3D, "VectorElement('DG', %s, 1)" % triangle_3D, "MixedElement([VectorElement('CG', %s, 2), \ FiniteElement('CG', %s, 1)])" % (interval_2D, interval_2D), "MixedElement([VectorElement('CG', %s, 2), \ FiniteElement('CG', %s, 1)])" % (interval_3D, interval_3D), "MixedElement([VectorElement('CG', %s, 2), \ FiniteElement('CG', %s, 1)])" % (triangle_3D, triangle_3D), "MixedElement([FiniteElement('RT', %s, 2), \ FiniteElement('BDM', %s, 1), \ FiniteElement('N1curl', %s, 1), \ FiniteElement('DG', %s, 1)])" % (triangle_3D, triangle_3D, triangle_3D, triangle_3D) ] ffc-1.6.0/test/regression/ffc-reference-data-id000066400000000000000000000000511255571034100213300ustar00rootroot00000000000000642d3341c412d357b023a4de204a38c352ad82f5 ffc-1.6.0/test/regression/printer.h000066400000000000000000000064321255571034100172570ustar00rootroot00000000000000#ifndef PRINTER_H_INCLUDED #define PRINTER_H_INCLUDED #include #include #include #include #include #include class Printer { protected: // Precision in output of floats const std::size_t precision; const double epsilon; // Output stream std::ostream & os; // Indentation level int level; public: Printer(std::ostream & os): precision(16), epsilon(1e-16), os(os), level(0) {} /// Indent to current level void indent() { for (int i=0; i= 0) s << "_" << i; if (j >= 0) s << "_" << j; return s.str(); } /// Format '"foo": ' properly void begin_entry(std::string name, int i=-1, int j=-1) { name = format_name(name, i, j); indent(); os << '"' << name << '"' << ": "; } /// Begin an unnamed block void begin() { os << "{" << std::endl; ++level; } /// Begin a named block entry void begin(std::string name, int i=-1, int j=-1) { begin_entry(name, i, j); begin(); } /// End current named or unnamed block void end() { --level; assert(level >= 0); indent(); if (level > 0) os << "}," << std::endl; else os << "}" << std::endl; } /// Format a value type properly template void print_value(T value); /// Set a named single value entry template void print_scalar(std::string name, T value, int i=-1, int j=-1) { begin_entry(name, i, j); print_value(value); os << ", " << std::endl; } /// Set a named array valued entry template void print_array(std::string name, int n, T * values, int i=-1, int j=-1) { begin_entry(name, i, j); os << "["; if (n > 0) print_value(values[0]); for (int k=1; k void print_vector(std::string name, typename std::vector values, int i=-1, int j=-1) { begin_entry(name, i, j); os << "["; typename std::vector::iterator k=values.begin(); if (k!=values.end()) { print_value(*k); ++k; } while (k!=values.end()) { os << ", "; print_value(*k); ++k; } os << "], " << std::endl; } }; /// Fallback formatting for any value type template void Printer::print_value(T value) { os << value; } /// Use precision for floats template<> void Printer::print_value(double value) { os.setf(std::ios::scientific, std::ios::floatfield); os.precision(precision); if (std::abs(static_cast(value)) < epsilon) os << "0.0"; else os << value; } /// Use precision for floats template<> void Printer::print_value(float value) { print_value(static_cast(value)); } /// Wrap strings in quotes template<> void Printer::print_value(std::string value) { os << '"' << value << '"'; } /// Wrap strings in quotes template<> void Printer::print_value(const char * value) { os << '"' << value << '"'; } #endif ffc-1.6.0/test/regression/recdiff.py000077500000000000000000000142111255571034100173740ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- class DiffMarkerType: def __init__(self, name): self.name = name def __str__(self): return self.name def __repr__(self): return self.name DiffMissing = DiffMarkerType("") DiffEqual = DiffMarkerType("") _default_recdiff_tolerance = 1e-6 def recdiff_dict(data1, data2, tolerance=_default_recdiff_tolerance): keys1 = set(data1.keys()) keys2 = set(data2.keys()) keys = keys1.intersection(keys2) diff = {} for k in keys1-keys: diff[k] = (data1[k], DiffMissing) for k in keys2-keys: diff[k] = (DiffMissing, data2[k]) for k in keys: d1 = data1[k] d2 = data2[k] d = recdiff(d1, d2, tolerance) if d is not DiffEqual: diff[k] = d return diff or DiffEqual def recdiff(data1, data2, tolerance=_default_recdiff_tolerance): if isinstance(data1, (float,int)) and isinstance(data2, (float,int)): # This approach allows numbers formatted as ints and floats interchangably as long as the values are equal delta = abs(data1 - data2) avg = (abs(data1) + abs(data2)) / 2.0 if 0: # Using relative comparison, i.e. a tolerance of 1e-2 means one percent error is acceptable eps = tolerance * avg same = avg < 1e-14 or delta < eps else: # Using absolute comparison, this is what the old .out comparison does same = delta < tolerance return DiffEqual if same else (data1, data2) elif type(data1) != type(data2): return (data1, data2) elif isinstance(data1, dict): return recdiff_dict(data1, data2, tolerance) elif isinstance(data1, list): diff = [recdiff(d1, d2, tolerance) for (d1,d2) in zip(data1, data2)] return DiffEqual if all(d is DiffEqual for d in diff) else diff else: return DiffEqual if data1 == data2 else (data1, data2) def _print(line): print(line) def print_recdiff(diff, indent=0, printer=_print, prekey=""): if isinstance(diff, dict): for k in sorted(diff.keys()): key = str(k) if prekey: key = ".".join((prekey, key)) printer("%s%s: " % (" "*indent, key)) print_recdiff(diff[k], indent+1, printer, key) elif isinstance(diff, list): # Limiting this to lists of scalar values! for i, d in enumerate(diff): if isinstance(d, tuple): data1, data2 = d printer("%s%d: %s != %s" % (" "*indent, i, data1, data2)) elif isinstance(diff, tuple): assert len(diff) == 2 data1, data2 = diff data1 = str(data1) data2 = str(data2) if len(data1) + len(data2) + 2*indent + 4 > 70: printer("%s%s" % (" "*indent, data1)) printer("%s!=" % (" "*indent)) printer("%s%s" % (" "*indent, data2)) else: printer("%s%s != %s" % (" "*indent, data1, data2)) # ---------- Unittest code import unittest #from recdiff import recdiff, print_recdiff, DiffEqual, DiffMissing class RecDiffTestCase(unittest.TestCase): def assertEqual(self, a, b): if not (a == b): print(a) print(b) assert a == b def assertDiffEqual(self, diff): self.assertEqual(diff, DiffEqual) def test_recdiff_equal_items(self): self.assertDiffEqual(recdiff(1,1)) self.assertDiffEqual(recdiff(0,0)) self.assertDiffEqual(recdiff(0,1e-15)) self.assertDiffEqual(recdiff(1.1,1.1+1e-7,tolerance=1e-6)) self.assertDiffEqual(recdiff(1.1,1.1-1e-7,tolerance=1e-6)) self.assertDiffEqual(recdiff("foo", "foo")) def test_recdiff_not_equal_items(self): self.assertEqual(recdiff(1,2), (1,2)) self.assertEqual(recdiff(0,0.0001), (0,0.0001)) self.assertEqual(recdiff(0,1e-13), (0,1e-13)) self.assertEqual(recdiff(1.1,1.2+1e-7,tolerance=1e-6), (1.1,1.2+1e-7)) self.assertEqual(recdiff(1.1,1.2-1e-7,tolerance=1e-6), (1.1,1.2-1e-7)) self.assertEqual(recdiff("foo", "bar"), ("foo", "bar")) def test_recdiff_equal_list(self): self.assertDiffEqual(recdiff([1,2], [1,2])) def test_recdiff_not_equal_list(self): self.assertEqual(recdiff([1,2], [1,3]), [DiffEqual, (2,3)]) def test_recdiff_equal_dict(self): self.assertDiffEqual(recdiff({1:2}, {1:2})) def test_recdiff_not_equal_dict(self): self.assertEqual(recdiff({1:2,2:3}, {1:3,3:4}), {1:(2,3), 2:(3,DiffMissing), 3:(DiffMissing,4)}) def test_recdiff_equal_dict_hierarchy(self): self.assertDiffEqual(recdiff({1:{2:{3:4,5:6}}}, {1:{2:{3:4,5:6}}})) def test_recdiff_not_equal_dict_hierarchy(self): self.assertEqual(recdiff({1:{2:{3:4,5:6}}}, {1:{2:{3:4,5:7}}}), {1:{2:{5:(6,7)}}}) def test_example(self): form1 = { "num_coefficients": 2, "num_arguments": 2, "has_default_cell_integral": 1, "cell_integrals": { 0: { "tabulate_tensor_input1": ["data"] } }, } form2 = eval("""{ "num_coefficients": 2, "rank": 2, "has_default_cell_integral": 0, "cell_integrals": { 0: { "tabulate_tensor_input1": ["data2"] } }, }""") actual_diff = recdiff(form1, form2) if 0: print_recdiff(actual_diff) expected_diff = { #"num_coefficients": DiffEqual, "num_arguments": (2,DiffMissing), "rank": (DiffMissing,2), "has_default_cell_integral": (1,0), "cell_integrals": { 0: { "tabulate_tensor_input1": [("data", "data2")] } }, } self.assertEqual(actual_diff, expected_diff) def main(a, b, tolerance=_default_recdiff_tolerance): print("Running diff on files %s and %s" % (a, b)) a = eval(open(a).read()) b = eval(open(b).read()) d = recdiff(a, b, float(tolerance)) print_recdiff(d) if __name__ == "__main__": import sys args = sys.argv[1:] if not args: # Hack to be able to use this as a script, TODO: do something nicer print("No arguments, running tests.") unittest.main() else: main(*args) ffc-1.6.0/test/regression/scripts/000077500000000000000000000000001255571034100171055ustar00rootroot00000000000000ffc-1.6.0/test/regression/scripts/download000077500000000000000000000022731255571034100206460ustar00rootroot00000000000000#!/bin/bash # # Copyright (C) 2013 Anders Logg and Martin Sandve Alnaes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Johannes Ring, 2013-04-23 # # First added: 2013-04-22 # Last changed: 2013-08-20 # # This script downloads the reference data for the FFC regression tests # and updates to the reference data version specified by the data id file. # Parameters source ./scripts/parameters # Get updated reference repository ./scripts/getreferencerepo if [ $? -ne 0 ]; then exit 1 fi # Checkout data referenced by id file ./scripts/getdata if [ $? -ne 0 ]; then exit 1 fi ffc-1.6.0/test/regression/scripts/getdata000077500000000000000000000022111255571034100204400ustar00rootroot00000000000000#!/bin/bash # # Copyright (C) 2013 Anders Logg and Martin Sandve Alnaes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2013-04-22 # Last changed: 2013-08-21 # # This script checks out reference data by the given commit id, # or if none given using the commit id found in data id file. # Parameters source scripts/parameters # Take data id as optional argument or get from file DATA_ID=$1 && [ -z "$DATA_ID" ] && DATA_ID=`cat $DATA_ID_FILE` # Checkout data referenced by id (cd $DATA_DIR && git checkout -B auto $DATA_ID) exit $? ffc-1.6.0/test/regression/scripts/getreferencerepo000077500000000000000000000036121255571034100223610ustar00rootroot00000000000000#!/bin/bash # # Copyright (C) 2013 Anders Logg and Martin Sandve Alnaes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2013-04-22 # Last changed: 2013-08-21 # # This script overwrites the reference data with the current output # and stores the new reference data as part of the FFC reference data # repository. # Parameters source ./scripts/parameters # Get reference repository if [ ! -d "$DATA_DIR" ]; then echo "Cloning reference data repository" git clone $DATA_REPO_GIT if [ ! -d "$DATA_DIR" ]; then git clone $DATA_REPO_HTTPS fi else pushd $DATA_DIR echo "Found existing reference data repository, pulling new data" git checkout master if [ $? -ne 0 ]; then echo "Failed to checkout master, check state of reference data directory." exit 1 fi git fetch if [ $? -ne 0 ]; then echo "WARNING: Failed to fetch latest reference data from server." else git pull if [ $? -ne 0 ]; then echo "Failed to pull latest reference data from server, possibly a merge situation." exit 1 fi fi popd fi # Check that we had success with getting reference repository if [ ! -d "$DATA_DIR" ]; then echo "Failed to update reference data directory '$DATA_DIR'." exit 1 fi ffc-1.6.0/test/regression/scripts/parameters000077500000000000000000000003551255571034100212010ustar00rootroot00000000000000OUTPUT_DIR="output" DATA_REPO_GIT="git@bitbucket.org:fenics-project/ffc-reference-data.git" DATA_REPO_HTTPS="https://bitbucket.org/fenics-project/ffc-reference-data.git" DATA_DIR="ffc-reference-data" DATA_ID_FILE="ffc-reference-data-id" ffc-1.6.0/test/regression/scripts/upload000077500000000000000000000042221255571034100203170ustar00rootroot00000000000000#!/bin/bash # # Copyright (C) 2013 Anders Logg and Martin Sandve Alnaes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2013-04-22 # Last changed: 2013-08-21 # # This script overwrites the reference data with the current output # and stores the new reference data as part of the FFC reference data # repository. The commit id of the stored reference data is commited # to a file in the main repo. # Parameters source ./scripts/parameters # Get updated reference repository ./scripts/getreferencerepo if [ $? -ne 0 ]; then exit 1 fi # Check that we have any data if [ ! -d "$OUTPUT_DIR" ]; then echo "Missing data directory '$OUTPUT_DIR'." exit 1 fi # Copy references echo "Copying new reference data to $DATA_DIR" rsync -r --exclude='README.rst' --exclude='*.bin' --exclude='*.cpp' $OUTPUT_DIR/ $DATA_DIR echo "" # Get current id for main repo (does not include dirty files, so not quite trustworthy!) REPO_ID=`git rev-list --max-count 1 HEAD` # Commit new data to reference repository pushd $DATA_DIR git add * git commit -m "Update reference data, current project head is ${REPO_ID}." | grep -v "create mode" if [ $? -ne 0 ]; then echo "Failed to commit reference data." exit 1 fi DATA_ID=`git rev-list --max-count 1 HEAD` popd # Commit reference data commit id to file in main repo echo $DATA_ID > $DATA_ID_FILE git commit $DATA_ID_FILE -m"Update reference data pointer to ${DATA_ID}." # Push references to server pushd $DATA_DIR git push if [ $? -ne 0 ]; then echo "WARNING: Failed to push new reference data to server." fi popd ffc-1.6.0/test/regression/test.py000077500000000000000000000404651255571034100167630ustar00rootroot00000000000000#!/usr/bin/env python """This script compiles and verifies the output for all form files found in the 'demo' directory. The verification is performed in two steps. First, the generated code is compared with stored references. Then, the output from all functions in the generated code is compared with stored reference values. This script can also be used for benchmarking tabulate_tensor for all form files found in the 'bench' directory. To run benchmarks, use the option --bench. """ # Copyright (C) 2010-2013 Anders Logg, Kristian B. Oelgaard and Marie E. Rognes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Martin Alnaes, 2013-2015 # Modified by Johannes Ring, 2013 # Modified by Kristian B. Oelgaard, 2013 # Modified by Garth N. Wells, 2014 # FIXME: Need to add many more test cases. Quite a few DOLFIN forms # failed after the FFC tests passed. import os, sys, shutil, difflib, sysconfig from numpy import array, shape, abs, max, isnan from ffc.log import begin, end, info, info_red, info_green, info_blue from ufctest import generate_test_code from instant.output import get_status_output import time # Parameters debug = False # TODO: Can make this a cmdline argument, and start crashing programs in debugger automatically? output_tolerance = 1.e-6 demo_directory = "../../../../demo" bench_directory = "../../../../bench" # Global log file logfile = None # Extended quadrature tests (optimisations) ext_quad = [\ "-r quadrature -O -feliminate_zeros", "-r quadrature -O -fsimplify_expressions", "-r quadrature -O -fprecompute_ip_const", "-r quadrature -O -fprecompute_basis_const", "-r quadrature -O -fprecompute_ip_const -feliminate_zeros", "-r quadrature -O -fprecompute_basis_const -feliminate_zeros", ] # Extended uflacs tests (to be extended with optimisation parameters later) ext_uflacs = [\ "-r uflacs", ] _command_timings = [] def run_command(command): "Run command and collect errors in log file." # Debugging: #print "IN DIRECTORY:", os.path.abspath(os.curdir) #print "RUNNING COMMAND:", command t1 = time.time() (status, output) = get_status_output(command) t2 = time.time() global _command_timings _command_timings.append((command, t2-t1)) if status == 0: return True global logfile if logfile is None: logfile = open("../../error.log", "w") logfile.write(output + "\n") print(output) return False def log_error(message): "Log error message." global logfile if logfile is None: logfile = open("../../error.log", "w") logfile.write(message + "\n") def clean_output(output_directory): "Clean out old output directory" if os.path.isdir(output_directory): shutil.rmtree(output_directory) os.mkdir(output_directory) def generate_test_cases(bench, only_forms): "Generate form files for all test cases." begin("Generating test cases") # Copy form files if bench: form_directory = bench_directory else: form_directory = demo_directory # Make list of form files form_files = [f for f in os.listdir(form_directory) if f.endswith(".ufl")] if only_forms: form_files = [f for f in form_files if f in only_forms] form_files.sort() for f in form_files: shutil.copy(os.path.join(form_directory, f), ".") info_green("Found %d form files" % len(form_files)) # Generate form files for forms info("Generating form files for extra forms: Not implemented") # Generate form files for elements if not bench: from elements import elements info("Generating form files for extra elements (%d elements)" % len(elements)) for (i, element) in enumerate(elements): open("X_Element%d.ufl" % i, "w").write("element = %s" % element) end() def generate_code(args, only_forms): "Generate code for all test cases." # Get a list of all files form_files = [f for f in os.listdir(".") if f.endswith(".ufl")] if only_forms: form_files = [f for f in form_files if f in only_forms] form_files.sort() begin("Generating code (%d form files found)" % len(form_files)) # TODO: Parse additional options from .ufl file? I.e. grep for # some sort of tag like '#ffc: '. special = { "AdaptivePoisson.ufl": "-e", } # Iterate over all files for f in form_files: options = special.get(f, "") cmd = ("ffc %s %s -f precision=8 -fconvert_exceptions_to_warnings %s" % (options, " ".join(args), f)) # Generate code ok = run_command(cmd) # Check status if ok: info_green("%s OK" % f) else: info_red("%s failed" % f) end() def validate_code(reference_dir): "Validate generated code against references." # Get a list of all files header_files = [f for f in os.listdir(".") if f.endswith(".h")] header_files.sort() begin("Validating generated code (%d header files found)" % len(header_files)) # Iterate over all files for f in header_files: # Get generated code generated_code = open(f).read() # Get reference code reference_file = os.path.join(reference_dir, f) if os.path.isfile(reference_file): reference_code = open(reference_file).read() else: info_blue("Missing reference for %s" % reference_file) continue # Compare with reference if generated_code == reference_code: info_green("%s OK" % f) else: info_red("%s differs" % f) diff = "\n".join([line for line in difflib.unified_diff(reference_code.split("\n"), generated_code.split("\n"))]) s = ("Code differs for %s, diff follows (reference first, generated second)" % os.path.join(*reference_file.split(os.path.sep)[-3:])) log_error("\n" + s + "\n" + len(s)*"-") log_error(diff) end() def build_programs(bench, permissive): "Build test programs for all test cases." # Get a list of all files header_files = [f for f in os.listdir(".") if f.endswith(".h")] header_files.sort() begin("Building test programs (%d header files found)" % len(header_files)) # Get UFC flags ufc_cflags = get_status_output("pkg-config --cflags ufc-1")[1].strip() # Get Boost dir (code copied from ufc/src/utils/python/ufc_utils/build.py) # Set a default directory for the boost installation if sys.platform == "darwin": # Use Brew as default default = os.path.join(os.path.sep, "usr", "local") else: default = os.path.join(os.path.sep, "usr") # If BOOST_DIR is not set use default directory boost_inc_dir = "" boost_lib_dir = "" boost_math_tr1_lib = "boost_math_tr1" boost_dir = os.getenv("BOOST_DIR", default) boost_is_found = False for inc_dir in ["", "include"]: if os.path.isfile(os.path.join(boost_dir, inc_dir, "boost", "version.hpp")): boost_inc_dir = os.path.join(boost_dir, inc_dir) break libdir_multiarch = "lib/" + sysconfig.get_config_vars().get("MULTIARCH", "") for lib_dir in ["", "lib", libdir_multiarch, "lib64"]: for ext in [".so", "-mt.so", ".dylib", "-mt.dylib"]: _lib = os.path.join(boost_dir, lib_dir, "lib" + boost_math_tr1_lib + ext) if os.path.isfile(_lib): if "-mt" in _lib: boost_math_tr1_lib += "-mt" boost_lib_dir = os.path.join(boost_dir, lib_dir) break if boost_inc_dir != "" and boost_lib_dir != "": boost_is_found = True if not boost_is_found: raise OSError("""The Boost library was not found. If Boost is installed in a nonstandard location, set the environment variable BOOST_DIR. """) ufc_cflags += " -I%s -L%s" % (boost_inc_dir, boost_lib_dir) # Set compiler options compiler_options = "%s -Wall" % ufc_cflags if not permissive: compiler_options += " -Werror -pedantic" if bench: info("Benchmarking activated") # Takes too long to build with -O2 #compiler_options += " -O2" compiler_options += " -O3" #compiler_options += " -O3 -fno-math-errno -march=native" if debug: info("Debugging activated") compiler_options += " -g -O0" info("Compiler options: %s" % compiler_options) # Iterate over all files for f in header_files: # Generate test code filename = generate_test_code(f) # Compile test code prefix = f.split(".h")[0] command = "g++ %s -o %s.bin %s.cpp -l%s" % \ (compiler_options, prefix, prefix, boost_math_tr1_lib) ok = run_command(command) # Check status if ok: info_green("%s OK" % prefix) else: info_red("%s failed" % prefix) end() def run_programs(bench): "Run generated programs." # This matches argument parsing in the generated main files bench = 'b' if bench else '' # Get a list of all files test_programs = [f for f in os.listdir(".") if f.endswith(".bin")] test_programs.sort() begin("Running generated programs (%d programs found)" % len(test_programs)) # Iterate over all files for f in test_programs: # Compile test code prefix = f.split(".bin")[0] ok = run_command(".%s%s.bin %s" % (os.path.sep, prefix, bench)) # Check status if ok: info_green("%s OK" % f) else: info_red("%s failed" % f) end() def validate_programs(reference_dir): "Validate generated programs against references." # Get a list of all files output_files = sorted(f for f in os.listdir(".") if f.endswith(".json")) begin("Validating generated programs (%d .json program output files found)" % len(output_files)) # Iterate over all files for fj in output_files: # Get generated json output if os.path.exists(fj): generated_json_output = open(fj).read() if "nan" in generated_json_output: info_red("Found nan in generated json output, replacing with 999 to be able to parse as python dict.") generated_json_output = generated_json_output.replace("nan", "999") else: generated_json_output = "{}" # Get reference json output reference_json_file = os.path.join(reference_dir, fj) if os.path.isfile(reference_json_file): reference_json_output = open(reference_json_file).read() else: info_blue("Missing reference for %s" % reference_json_file) reference_json_output = "{}" # Compare json with reference using recursive diff algorithm # TODO: Write to different error file? from recdiff import recdiff, print_recdiff, DiffEqual # Assuming reference is well formed reference_json_output = eval(reference_json_output) try: generated_json_output = eval(generated_json_output) except Exception as e: info_red("Failed to evaluate json output for %s" % fj) log_error(str(e)) generated_json_output = None json_diff = (None if generated_json_output is None else recdiff(generated_json_output, reference_json_output, tolerance=output_tolerance)) json_ok = json_diff == DiffEqual # Check status if json_ok: info_green("%s OK" % fj) else: info_red("%s differs" % fj) log_error("Json output differs for %s, diff follows (generated first, reference second)" % os.path.join(*reference_json_file.split(os.path.sep)[-3:])) print_recdiff(json_diff, printer=log_error) end() def main(args): "Run all regression tests." # Check command-line arguments TODO: Use argparse generate_only = "--generate-only" in args fast = "--fast" in args bench = "--bench" in args use_quad = "--skip-quad" not in args use_ext_quad = "--ext-quad" in args use_ext_uflacs = "--ext-uflacs" in args permissive = "--permissive" in args tolerant = "--tolerant" in args print_timing = "--print-timing" in args skip_download = "--skip-download" in args ignore_code_diff = "--ignore-code-diff" in args flags = ( "--generate-only", "--fast", "--bench", "--skip-quad", "--ext-quad", "--ext-uflacs", "--permissive", "--tolerant", "--print-timing", "--skip-download", "--ignore-code-diff", ) args = [arg for arg in args if not arg in flags] # Extract .ufl names from args only_forms = set([arg for arg in args if arg.endswith(".ufl")]) args = [arg for arg in args if arg not in only_forms] # Download reference data if skip_download: info_blue("Skipping reference data download") else: failure, output = get_status_output("./scripts/download") print(output) if failure: info_red("Download reference data failed") else: info_green("Download reference data ok") if tolerant: global output_tolerance output_tolerance = 1e-3 # Clean out old output directory output_directory = "output" clean_output(output_directory) os.chdir(output_directory) # Adjust which test cases (combinations of compile arguments) to # run here test_cases = ["-r auto"] if use_quad and (not bench and not fast): test_cases += ["-r quadrature", "-r quadrature -O"] if use_ext_quad: test_cases += ext_quad if use_ext_uflacs: test_cases = ext_uflacs test_cases += ["-r quadrature"] #test_cases += ["-r quadrature -O"] for argument in test_cases: begin("Running regression tests with %s" % argument) # Clear and enter output sub-directory sub_directory = "_".join(argument.split(" ")).replace("-", "") clean_output(sub_directory) os.chdir(sub_directory) # Generate test cases generate_test_cases(bench, only_forms) # Generate code generate_code(args + [argument], only_forms) # Location of reference directories reference_directory = os.path.abspath("../../ffc-reference-data/") code_reference_dir = os.path.join(reference_directory, sub_directory) # Note: We use the r_auto references for all test cases. This # ensures that we continously test that the codes generated by # all different representations are equivalent. output_reference_dir = os.path.join(reference_directory, "r_auto") # Validate code by comparing to code generated with this set # of compiler parameters if not bench and (argument not in ext_quad) and not ignore_code_diff: validate_code(code_reference_dir) # Build and run programs and validate output to common # reference if fast or generate_only: info("Skipping program validation") elif bench: build_programs(bench, permissive) run_programs(bench) else: build_programs(bench, permissive) run_programs(bench) validate_programs(output_reference_dir) # Go back up os.chdir(os.path.pardir) end() # Print results if print_timing: timings = '\n'.join("%10.2e s %s" % (t, name) for (name, t) in _command_timings) info_green("Timing of all commands executed:") info(timings) if logfile is None: info_green("Regression tests OK") return 0 else: info_red("Regression tests failed") info("Error messages stored in error.log") return 1 if __name__ == "__main__": sys.exit(main(sys.argv[1:])) ffc-1.6.0/test/regression/ufctest.h000066400000000000000000000635621255571034100172600ustar00rootroot00000000000000// Copyright (C) 2010-2015 Anders Logg // // This file is part of FFC. // // FFC is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // FFC is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with FFC. If not, see . // // Modified by Martin Alnaes, 2013-2015 // // Functions for calling generated UFC functions with "random" (but // fixed) data and print the output to screen. Useful for running // regression tests. #include #include #include #include #include #include #include #include "printer.h" // How many derivatives to test const std::size_t max_derivative = 2; // Parameters for adaptive timing const std::size_t initial_num_reps = 10; const double minimum_timing = 1.0; // Function for timing double time() { clock_t __toc_time = std::clock(); return ((double) (__toc_time)) / CLOCKS_PER_SEC; } // Function for creating "random" vertex coordinates std::vector test_vertex_coordinates(int gdim) { // Generate some "random" coordinates std::vector vertex_coordinates; if (gdim == 1) { vertex_coordinates.resize(4); vertex_coordinates[0] = 0.903; vertex_coordinates[1] = 0.561; vertex_coordinates[2] = 0.987; vertex_coordinates[3] = 0.123; } else if (gdim == 2) { vertex_coordinates.resize(8); vertex_coordinates[0] = 0.903; vertex_coordinates[1] = 0.341; vertex_coordinates[2] = 0.561; vertex_coordinates[3] = 0.767; vertex_coordinates[4] = 0.987; vertex_coordinates[5] = 0.783; vertex_coordinates[6] = 0.123; vertex_coordinates[7] = 0.561; } else if (gdim == 3) { vertex_coordinates.resize(12); vertex_coordinates[0] = 0.903; vertex_coordinates[1] = 0.341; vertex_coordinates[2] = 0.457; vertex_coordinates[3] = 0.561; vertex_coordinates[4] = 0.767; vertex_coordinates[5] = 0.833; vertex_coordinates[6] = 0.987; vertex_coordinates[7] = 0.783; vertex_coordinates[8] = 0.191; vertex_coordinates[9] = 0.123; vertex_coordinates[10] = 0.561; vertex_coordinates[11] = 0.667; } return vertex_coordinates; } // Class for creating "random" ufc::cell objects class test_cell : public ufc::cell { public: test_cell(ufc::shape cell_shape, int gdim=0, int offset=0) { // Store cell shape this->cell_shape = cell_shape; // Store dimensions switch (cell_shape) { case ufc::interval: topological_dimension = 1; if (gdim == 0) geometric_dimension = 1; else geometric_dimension = gdim; break; case ufc::triangle: topological_dimension = 2; if (gdim == 0) geometric_dimension = 2; else geometric_dimension = gdim; break; case ufc::tetrahedron: topological_dimension = 3; if (gdim == 0) geometric_dimension = 3; else geometric_dimension = gdim; break; default: throw std::runtime_error("Unhandled cell shape."); } // Set orientation (random, but must be set) this->orientation = 1; // Generate some "random" entity indices entity_indices.resize(4); for (std::size_t i = 0; i < 4; i++) { entity_indices[i].resize(6); for (std::size_t j = 0; j < 6; j++) entity_indices[i][j] = i*j + offset; } } ~test_cell() {} }; // Class for creating a "random" ufc::function object class test_function : public ufc::function { public: test_function(std::size_t value_size) : value_size(value_size) {} void evaluate(double* values, const double* coordinates, const ufc::cell& c) const { for (std::size_t i = 0; i < value_size; i++) { values[i] = 1.0; for (std::size_t j = 0; j < c.geometric_dimension; j++) values[i] *= static_cast(i + 1)*coordinates[j]; } } private: std::size_t value_size; }; std::string format_name(std::string name, int i=-1, int j=-1) { std::stringstream s; s << name; if (i >= 0) s << "_" << i; if (j >= 0) s << "_" << j; return s.str(); } // Function for testing ufc::element objects void test_finite_element(ufc::finite_element& element, int id, Printer& printer) { printer.begin("finite_element", id); // Prepare arguments test_cell c(element.cell_shape(), element.geometric_dimension()); const std::vector vertex_coordinates = test_vertex_coordinates(element.geometric_dimension()); std::size_t value_size = 1; for (std::size_t i = 0; i < element.value_rank(); i++) value_size *= element.value_dimension(i); std::size_t derivative_size = 1; for (std::size_t i = 0; i < max_derivative; i++) derivative_size *= c.geometric_dimension; double* values = new double[element.space_dimension()*value_size*derivative_size]; for (std::size_t i = 0; i < element.space_dimension()*value_size*derivative_size; i++) { values[i] = 0.0; } double* dof_values = new double[element.space_dimension()]; for (std::size_t i = 0; i < element.space_dimension(); i++) dof_values[i] = 0.0; double* vertex_values = new double[(c.topological_dimension + 1)*value_size]; for (std::size_t i = 0; i < (c.topological_dimension + 1)*value_size; i++) vertex_values[i] = 0.0; double* coordinates = new double[c.geometric_dimension]; for (std::size_t i = 0; i < c.geometric_dimension; i++) coordinates[i] = 0.1*static_cast(i); test_function f(value_size); // signature //printer.print_scalar("signature", element.signature()); // cell_shape printer.print_scalar("cell_shape", element.cell_shape()); // space_dimension printer.print_scalar("space_dimension", element.space_dimension()); // value_rank printer.print_scalar("value_rank", element.value_rank()); // value_dimension for (std::size_t i = 0; i < element.value_rank(); i++) printer.print_scalar("value_dimension", element.value_dimension(i), i); // evaluate_basis for (std::size_t i = 0; i < element.space_dimension(); i++) { element.evaluate_basis(i, values, coordinates, vertex_coordinates.data(), 1); printer.print_array("evaluate_basis:", value_size, values, i); } // evaluate_basis all element.evaluate_basis_all(values, coordinates, vertex_coordinates.data(), 1); printer.print_array("evaluate_basis_all", element.space_dimension()*value_size, values); // evaluate_basis_derivatives for (std::size_t i = 0; i < element.space_dimension(); i++) { for (std::size_t n = 0; n <= max_derivative; n++) { std::size_t num_derivatives = 1; for (std::size_t j = 0; j < n; j++) num_derivatives *= c.geometric_dimension; element.evaluate_basis_derivatives(i, n, values, coordinates, vertex_coordinates.data(), 1); printer.print_array("evaluate_basis_derivatives", value_size*num_derivatives, values, i, n); } } // evaluate_basis_derivatives_all for (std::size_t n = 0; n <= max_derivative; n++) { std::size_t num_derivatives = 1; for (std::size_t j = 0; j < n; j++) num_derivatives *= c.geometric_dimension; element.evaluate_basis_derivatives_all(n, values, coordinates, vertex_coordinates.data(), 1); printer.print_array("evaluate_basis_derivatives_all", element.space_dimension()*value_size*num_derivatives, values, n); } // evaluate_dof for (std::size_t i = 0; i < element.space_dimension(); i++) { dof_values[i] = element.evaluate_dof(i, f, vertex_coordinates.data(), 1, c); printer.print_scalar("evaluate_dof", dof_values[i], i); } // evaluate_dofs element.evaluate_dofs(values, f, vertex_coordinates.data(), 1, c); printer.print_array("evaluate_dofs", element.space_dimension(), values); // interpolate_vertex_values element.interpolate_vertex_values(vertex_values, dof_values, vertex_coordinates.data(), 1, c); printer.print_array("interpolate_vertex_values", (c.topological_dimension + 1)*value_size, vertex_values); // num_sub_dof_elements printer.print_scalar("num_sub_elements", element.num_sub_elements()); // create_sub_element for (std::size_t i = 0; i < element.num_sub_elements(); i++) { ufc::finite_element* sub_element = element.create_sub_element(i); test_finite_element(*sub_element, i, printer); delete sub_element; } // Cleanup delete [] values; delete [] dof_values; delete [] vertex_values; delete [] coordinates; printer.end(); } // Function for testing ufc::element objects void test_dofmap(ufc::dofmap& dofmap, ufc::shape cell_shape, int id, Printer& printer) { printer.begin("dofmap", id); // Prepare arguments std::vector num_entities(4); num_entities[0] = 10001; num_entities[1] = 10002; num_entities[2] = 10003; num_entities[3] = 10004; test_cell c(cell_shape, dofmap.geometric_dimension()); const std::vector vertex_coordinates = test_vertex_coordinates(dofmap.geometric_dimension()); std::size_t n = dofmap.num_element_dofs(); std::size_t* dofs = new std::size_t[n]; for (std::size_t i = 0; i < n; i++) dofs[i] = 0; std::size_t num_facets = c.topological_dimension + 1; std::vector coordinates(n*c.geometric_dimension); // needs_mesh_entities for (std::size_t d = 0; d <= c.topological_dimension; d++) { printer.print_scalar("needs_mesh_entities", dofmap.needs_mesh_entities(d), d); } // global_dimension printer.print_scalar("global_dimension", dofmap.global_dimension(num_entities)); // num_element_dofs printer.print_scalar("num_element_dofs", dofmap.num_element_dofs()); // geometric_dimension printer.print_scalar("geometric_dimension", dofmap.geometric_dimension()); // num_facet_dofs printer.print_scalar("num_facet_dofs", dofmap.num_facet_dofs()); // num_entity_dofs for (std::size_t d = 0; d <= c.topological_dimension; d++) printer.print_scalar("num_entity_dofs", dofmap.num_entity_dofs(d), d); // tabulate_dofs dofmap.tabulate_dofs(dofs, num_entities, c); printer.print_array("tabulate_dofs", dofmap.num_element_dofs(), dofs); // tabulate_facet_dofs for (std::size_t facet = 0; facet < num_facets; facet++) { dofmap.tabulate_facet_dofs(dofs, facet); printer.print_array("tabulate_facet_dofs", dofmap.num_facet_dofs(), dofs, facet); } // tabulate_entity_dofs for (std::size_t d = 0; d <= c.topological_dimension; d++) { std::size_t num_entities[4][4] = {{0, 0, 0, 0}, // dummy entities in 0D {2, 1, 0, 0}, // interval {3, 3, 1, 0}, // triangle {4, 6, 4, 1}}; // tetrahedron for (std::size_t i = 0; i < num_entities[c.topological_dimension][d]; i++) { dofmap.tabulate_entity_dofs(dofs, d, i); printer.print_array("tabulate_entity_dofs", dofmap.num_entity_dofs(d), dofs, d, i); } } // tabulate_coordinates dofmap.tabulate_coordinates(coordinates.data(), vertex_coordinates.data()); printer.print_vector("tabulate_coordinates", coordinates); // num_sub_dofmaps printer.print_scalar("num_sub_dofmaps", dofmap.num_sub_dofmaps()); // create_sub_dofmap for (std::size_t i = 0; i < dofmap.num_sub_dofmaps(); i++) { ufc::dofmap* sub_dofmap = dofmap.create_sub_dofmap(i); test_dofmap(*sub_dofmap, cell_shape, i, printer); delete sub_dofmap; } // Cleanup delete [] dofs; printer.end(); } // Function for testing ufc::cell_integral objects void test_cell_integral(ufc::cell_integral& integral, ufc::shape cell_shape, std::size_t gdim, std::size_t tensor_size, double** w, bool bench, int id, Printer & printer) { printer.begin("cell_integral", id); // Prepare arguments test_cell c(cell_shape, gdim); const std::vector vertex_coordinates = test_vertex_coordinates(gdim); double* A = new double[tensor_size]; for(std::size_t i = 0; i < tensor_size; i++) A[i] = 0.0; // Call tabulate_tensor integral.tabulate_tensor(A, w, vertex_coordinates.data(), c.orientation); printer.print_array("tabulate_tensor", tensor_size, A); // Benchmark tabulate tensor if (bench) { printer.begin("timing"); for (std::size_t num_reps = initial_num_reps;; num_reps *= 2) { double t0 = time(); for (std::size_t i = 0; i < num_reps; i++) { integral.tabulate_tensor(A, w, vertex_coordinates.data(), c.orientation); } double dt = time() - t0; if (dt > minimum_timing) { dt /= static_cast(num_reps); printer.print_scalar("cell_integral_timing_iterations", num_reps); printer.print_scalar("cell_integral_time", dt); break; } } printer.end(); } // Cleanup delete [] A; printer.end(); } // Function for testing ufc::exterior_facet_integral objects void test_exterior_facet_integral(ufc::exterior_facet_integral& integral, ufc::shape cell_shape, std::size_t gdim, std::size_t tensor_size, double** w, bool bench, int id, Printer & printer) { printer.begin("exterior_facet_integral", id); // Prepare arguments test_cell c(cell_shape, gdim); const std::vector vertex_coordinates = test_vertex_coordinates(gdim); std::size_t num_facets = c.topological_dimension + 1; double* A = new double[tensor_size]; // Call tabulate_tensor for each facet for (std::size_t facet = 0; facet < num_facets; facet++) { for(std::size_t i = 0; i < tensor_size; i++) A[i] = 0.0; integral.tabulate_tensor(A, w, vertex_coordinates.data(), facet, c.orientation); printer.print_array("tabulate_tensor", tensor_size, A, facet); } // Benchmark tabulate tensor if (bench) { printer.begin("timing"); for (std::size_t num_reps = initial_num_reps;; num_reps *= 2) { double t0 = time(); for (std::size_t i = 0; i < num_reps; i++) integral.tabulate_tensor(A, w, vertex_coordinates.data(), 0, c.orientation); double dt = time() - t0; if (dt > minimum_timing) { dt /= static_cast(num_reps); printer.print_scalar("exterior_facet_integral_timing_iterations", num_reps); printer.print_scalar("exterior_facet_integral_time", dt); break; } } printer.end(); } // Cleanup delete [] A; printer.end(); } // Function for testing ufc::interior_facet_integral objects void test_interior_facet_integral(ufc::interior_facet_integral& integral, ufc::shape cell_shape, std::size_t gdim, std::size_t macro_tensor_size, double** w, bool bench, int id, Printer & printer) { printer.begin("interior_facet_integral", id); // Prepare arguments test_cell c0(cell_shape, gdim, 0); test_cell c1(cell_shape, gdim, 1); const std::vector vertex_coordinates0 = test_vertex_coordinates(gdim); const std::vector vertex_coordinates1 = test_vertex_coordinates(gdim); std::size_t num_facets = c0.topological_dimension + 1; double* A = new double[macro_tensor_size]; // Call tabulate_tensor for each facet-facet combination for (std::size_t facet0 = 0; facet0 < num_facets; facet0++) { for (std::size_t facet1 = 0; facet1 < num_facets; facet1++) { for(std::size_t i = 0; i < macro_tensor_size; i++) A[i] = 0.0; integral.tabulate_tensor(A, w, vertex_coordinates0.data(), vertex_coordinates1.data(), facet0, facet1, c0.orientation, c1.orientation); printer.print_array("tabulate_tensor", macro_tensor_size, A, facet0, facet1); } } // Benchmark tabulate tensor if (bench) { printer.begin("timing"); for (std::size_t num_reps = initial_num_reps;; num_reps *= 2) { double t0 = time(); for (std::size_t i = 0; i < num_reps; i++) { integral.tabulate_tensor(A, w, vertex_coordinates0.data(), vertex_coordinates1.data(), 0, 0, c0.orientation, c1.orientation); } double dt = time() - t0; if (dt > minimum_timing) { dt /= static_cast(num_reps); printer.print_scalar("interior_facet_integral_timing_iterations", num_reps); printer.print_scalar("interior_facet_integral_time", dt); break; } } printer.end(); } // Cleanup delete [] A; printer.end(); } // Function for testing ufc::vertex_integral objects void test_vertex_integral(ufc::vertex_integral& integral, ufc::shape cell_shape, std::size_t gdim, std::size_t tensor_size, double** w, bool bench, int id, Printer & printer) { printer.begin("vertex_integral", id); // Prepare arguments test_cell c(cell_shape, gdim); const std::vector vertex_coordinates = test_vertex_coordinates(gdim); std::size_t num_vertices = c.topological_dimension + 1; double* A = new double[tensor_size]; // Call tabulate_tensor for each vertex for (std::size_t vertex = 0; vertex < num_vertices; vertex++) { for(std::size_t i = 0; i < tensor_size; i++) A[i] = 0.0; integral.tabulate_tensor(A, w, vertex_coordinates.data(), vertex, c.orientation); printer.print_array("tabulate_tensor", tensor_size, A, vertex); } // Benchmark tabulate tensor if (bench) { printer.begin("timing"); for (std::size_t num_reps = initial_num_reps;; num_reps *= 2) { double t0 = time(); for (std::size_t i = 0; i < num_reps; i++) integral.tabulate_tensor(A, w, vertex_coordinates.data(), 0, c.orientation); double dt = time() - t0; if (dt > minimum_timing) { dt /= static_cast(num_reps); printer.print_scalar("vertex_integral_timing_iterations", num_reps); printer.print_scalar("vertex_integral_time", dt); break; } } printer.end(); } // Cleanup delete [] A; printer.end(); } // Function for testing ufc::form objects void test_form(ufc::form& form, bool bench, int id, Printer & printer) { printer.begin("form", id); // Compute size of tensors int tensor_size = 1; int macro_tensor_size = 1; for (std::size_t i = 0; i < form.rank(); i++) { ufc::finite_element* element = form.create_finite_element(i); tensor_size *= element->space_dimension(); macro_tensor_size *= 2*element->space_dimension(); // *2 for interior facet integrals delete element; } // Prepare dummy coefficients double** w = 0; if (form.num_coefficients() > 0) { w = new double * [form.num_coefficients()]; for (std::size_t i = 0; i < form.num_coefficients(); i++) { ufc::finite_element* element = form.create_finite_element(form.rank() + i); const std::size_t macro_dim = 2*element->space_dimension(); // *2 for interior facet integrals w[i] = new double[macro_dim]; for (std::size_t j = 0; j < macro_dim; j++) w[i][j] = 0.1*static_cast((i + 1)*(j + 1)); delete element; } } // Get cell shape ufc::finite_element* element = form.create_finite_element(0); ufc::shape cell_shape = element->cell_shape(); std::size_t gdim = element->geometric_dimension(); delete element; element = 0; // signature //printer.print_scalar("signature", form.signature()); // rank printer.print_scalar("rank", form.rank()); // num_coefficients printer.print_scalar("num_coefficients", form.num_coefficients()); // has_cell_integrals printer.print_scalar("has_cell_integrals", form.has_cell_integrals()); // has_exterior_facet_integrals printer.print_scalar("has_exterior_facet_integrals", form.has_exterior_facet_integrals()); // has_interior_facet_integrals printer.print_scalar("has_interior_facet_integrals", form.has_interior_facet_integrals()); // has_vertex_integrals printer.print_scalar("has_vertex_integrals", form.has_vertex_integrals()); // max_cell_subdomain_id printer.print_scalar("max_cell_subdomain_id", form.max_cell_subdomain_id()); // max_exterior_facet_subdomain_id printer.print_scalar("max_exterior_facet_subdomain_id", form.max_exterior_facet_subdomain_id()); // max_interior_facet_subdomain_id printer.print_scalar("max_interior_facet_subdomain_id", form.max_interior_facet_subdomain_id()); // max_vertex_subdomain_id printer.print_scalar("max_vertex_subdomain_id", form.max_vertex_subdomain_id()); // create_finite_element for (std::size_t i = 0; i < form.rank() + form.num_coefficients(); i++) { ufc::finite_element* element = form.create_finite_element(i); test_finite_element(*element, i, printer); delete element; } // create_dofmap for (std::size_t i = 0; i < form.rank() + form.num_coefficients(); i++) { ufc::dofmap* dofmap = form.create_dofmap(i); test_dofmap(*dofmap, cell_shape, i, printer); delete dofmap; } // create_cell_integral { ufc::cell_integral* integral = form.create_default_cell_integral(); printer.print_scalar("default_cell_integral", (bool)integral); if (integral) test_cell_integral(*integral, cell_shape, gdim, tensor_size, w, bench, -1, printer); delete integral; } for (std::size_t i = 0; i < form.max_cell_subdomain_id(); i++) { ufc::cell_integral* integral = form.create_cell_integral(i); if (integral) test_cell_integral(*integral, cell_shape, gdim, tensor_size, w, bench, i, printer); delete integral; } // create_exterior_facet_integral { ufc::exterior_facet_integral* integral = form.create_default_exterior_facet_integral(); printer.print_scalar("default_exterior_facet_integral", (bool)integral); if (integral) { test_exterior_facet_integral(*integral, cell_shape, gdim, tensor_size, w, bench, -1, printer); } delete integral; } for (std::size_t i = 0; i < form.max_exterior_facet_subdomain_id(); i++) { ufc::exterior_facet_integral* integral = form.create_exterior_facet_integral(i); if (integral) { test_exterior_facet_integral(*integral, cell_shape, gdim, tensor_size, w, bench, i, printer); } delete integral; } // create_interior_facet_integral { ufc::interior_facet_integral* integral = form.create_default_interior_facet_integral(); printer.print_scalar("default_interior_facet_integral", (bool)integral); if (integral) test_interior_facet_integral(*integral, cell_shape, gdim, macro_tensor_size, w, bench, -1, printer); delete integral; } for (std::size_t i = 0; i < form.max_interior_facet_subdomain_id(); i++) { ufc::interior_facet_integral* integral = form.create_interior_facet_integral(i); if (integral) { test_interior_facet_integral(*integral, cell_shape, gdim, macro_tensor_size, w, bench, i, printer); } delete integral; } // create_vertex_integral { ufc::vertex_integral* integral = form.create_default_vertex_integral(); printer.print_scalar("default_vertex_integral", (bool)integral); if (integral) { test_vertex_integral(*integral, cell_shape, gdim, tensor_size, w, bench, -1, printer); } delete integral; } for (std::size_t i = 0; i < form.max_vertex_subdomain_id(); i++) { ufc::vertex_integral* integral = form.create_vertex_integral(i); if (integral) { test_vertex_integral(*integral, cell_shape, gdim, tensor_size, w, bench, i, printer); } delete integral; } // Cleanup for (std::size_t i = 0; i < form.num_coefficients(); i++) delete [] w[i]; delete [] w; printer.end(); } ffc-1.6.0/test/regression/ufctest.py000066400000000000000000000042021255571034100174430ustar00rootroot00000000000000# Copyright (C) 2010-2013 Anders Logg, Kristian B. Oelgaard and Marie E. Rognes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Martin Alnaes, 2013-2015 _test_code = """\ #include "../../ufctest.h" #include "{prefix}.h" #include int main(int argc, char * argv[]) {{ const char jsonfilename[] = "{prefix}.json"; std::ofstream jsonfile(jsonfilename); Printer printer(jsonfile); printer.begin(); {benchline} {tests} printer.end(); return 0; }} """ def generate_test_code(header_file): "Generate test code for given header file." # Count the number of forms and elements prefix = header_file.split(".h")[0] generated_code = open(header_file).read() num_forms = generated_code.count("class %s_form_" % prefix.lower()) num_elements = generated_code.count("class %s_finite_element_" % prefix.lower()) # Generate tests, either based on forms or elements if num_forms > 0: benchline = " bool bench = (argc > 1) && argv[1][0] == 'b';\n" tests = [' {prefix}_form_{i} f{i}; test_form(f{i}, bench, {i}, printer);'.format(prefix=prefix.lower(), i=i) for i in range(num_forms)] else: benchline = "" tests = [' {prefix}_finite_element_{i} e{i}; test_finite_element(e{i}, {i}, printer);'.format(prefix=prefix.lower(), i=i) for i in range(num_elements)] # Write file test_file = open(prefix + ".cpp", "w") test_file.write(_test_code.format(prefix=prefix, benchline=benchline, tests="\n".join(tests))) test_file.close() ffc-1.6.0/test/test.py000066400000000000000000000027541255571034100145770ustar00rootroot00000000000000"""Run all tests, including unit tests and regression tests""" # Copyright (C) 2007 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2007-06-09 # Last changed: 2014-05-15 import os, re, sys # Name of log file pwd = os.path.dirname(os.path.abspath(__file__)) logfile = os.path.join(pwd, "test.log") os.system("rm -f %s" % logfile) # Tests to run tests = ["unit", "regression"] # Run tests failed = [] for test in tests: print("Running tests: %s" % test) print("----------------------------------------------------------------------") os.chdir(os.path.join(pwd, test)) #failure = os.system("python test.py | tee -a %s" % logfile) failure = os.system("python test.py") if failure: print("Test FAILED") failed.append(test) print("") print("To view the test log, use the following command: less -R test.log") sys.exit(len(failed)) ffc-1.6.0/test/unit/000077500000000000000000000000001255571034100142155ustar00rootroot00000000000000ffc-1.6.0/test/unit/elements/000077500000000000000000000000001255571034100160315ustar00rootroot00000000000000ffc-1.6.0/test/unit/elements/test.py000066400000000000000000000025101255571034100173600ustar00rootroot00000000000000"""Unit tests for FFC finite elements""" # Copyright (C) 2013 Marie E. Rognes # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . import unittest from ufl import interval from ufl import FiniteElement from ffc import compile_element class TestCompileElements(unittest.TestCase): def testRadau(self): "Test that Radau elements compile." for degree in range(3): element = FiniteElement("Radau", interval, degree) compile_element(element) def testLobatto(self): "Test that Lobatto elements compile." for degree in range(1, 4): element = FiniteElement("Lobatto", interval, degree) compile_element(element) if __name__ == "__main__": unittest.main() ffc-1.6.0/test/unit/evaluate_basis/000077500000000000000000000000001255571034100172045ustar00rootroot00000000000000ffc-1.6.0/test/unit/evaluate_basis/__init__.py000066400000000000000000000000001255571034100213030ustar00rootroot00000000000000ffc-1.6.0/test/unit/evaluate_basis/cppcode.py000066400000000000000000000115471255571034100212030ustar00rootroot00000000000000"This module provides simple C++ code for verification of UFC code." # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-18 # Last changed: 2010-01-18 #evaluate_basis_code = """\ ##include ##include ##include "test.h" #int main() #{ # // Create element # %(element)s element; # // Size of dof_values # // FIXME: This will not work for TensorElements # int N = element.value_dimension(0); # // Create values # double* dof_values = new double[N]; # for (unsigned int i = 0; i < N; i++) # dof_values[i] = 0.0; # // Create vertex coordinates and fill with some arbitrary data # double vertex_coordinates[24] = {0.90, 0.34, 0.45, # 0.56, 0.76, 0.83, # 0.98, 0.78, 0.19, # 0.12, 0.56, 0.66, # 0.96, 0.78, 0.63, # 0.11, 0.35, 0.49, # 0.51, 0.88, 0.65, # 0.98, 0.45, 0.01}; # // Random coordinates where we want to evaluate the basis functions # double coordinates[3] = {0.32, 0.51, 0.05}; # // Loop element space dimension and call evaluate_basis. # for (unsigned int i = 0; i < element.space_dimension(); i++) # { # element.evaluate_basis(i, dof_values, coordinates, vertex_coordinates, 0); # // Print values # for (unsigned int j = 0; j < N; j++) # std::cout << dof_values[j] << " "; # } # std::cout << std::endl; # return 0; #} #""" evaluate_basis_code_fiat = """\ #include #include #include #include "test.h" int main(int argc, char* argv[]) { // Create element %(element)s element; // Get derivative order unsigned int n = std::atoi(argv[1]); // Value dimension int N; if (element.value_rank() == 0) { N = 1; } else { N = 1; for (unsigned int i = 0; i < element.value_rank(); i++) { N = N * element.value_dimension(i); } } // Compute number of derivatives. unsigned int num_derivatives = 1; for (unsigned int r = 0; r < n; r++) { num_derivatives *= %(dim)d; } // Create values unsigned int num_dof_vals = N*num_derivatives; double* dof_values = new double[num_dof_vals]; for (unsigned int i = 0; i < num_dof_vals; i++) dof_values[i] = 0.0; %(cell_ref_coords)s double vertex_coordinates[%(num_coords)d*%(dim)d]; int k = 0; for (int i = 0; i < %(num_coords)d; i++) { for (int j = 0; j < %(dim)d; j++) vertex_coordinates[k++] = cell_ref_coords[i][j]; } // Random points where we want to evaluate the basis functions // coordinates of dofs and three arbitrary points on the reference cell. double points[%(num_points)d][%(dim)d] = %(points)s // Init array of coordinates double coordinates[3] = {0,0,0}; std::cout.precision(8); std::cout.setf(std::ios::fixed); // If we're testing evaluate_basis, loop all points. if (n == 0) { for (unsigned int p = 0; p < %(num_points)d; p++) { for (unsigned int d = 0; d < %(dim)d; d++) { coordinates[d] = points[p][d]; } // Loop element space dimension and call evaluate_basis. for (unsigned int i = 0; i < element.space_dimension(); i++) { element.evaluate_basis(i, dof_values, coordinates, vertex_coordinates, 0); // Print values for (unsigned int j = 0; j < num_dof_vals; j++) std::cout << dof_values[j] << " "; } std::cout << std::endl; } } else { // Else loop the arbitrary 3 points, otherwise the number of tests explode // with the element.space_dimension()^2. for (unsigned int p = element.space_dimension(); p < %(num_points)d; p++) { for (unsigned int d = 0; d < %(dim)d; d++) { coordinates[d] = points[p][d]; } // Loop element space dimension and call evaluate_basis. for (unsigned int i = 0; i < element.space_dimension(); i++) { element.evaluate_basis_derivatives(i, n, dof_values, coordinates, vertex_coordinates, 0); // Print values for (unsigned int j = 0; j < num_dof_vals; j++) std::cout << dof_values[j] << " "; } std::cout << std::endl; } } return 0; } """ ffc-1.6.0/test/unit/evaluate_basis/elements.py000066400000000000000000000111521255571034100213720ustar00rootroot00000000000000# Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2014-01-28 # Last changed: 2014-01-28 from ufl import FiniteElement, MixedElement from .test_common import xcomb __all__ = ["single_elements", "mixed_elements"] # Elements, supported by FFC and FIAT, and their supported shape and orders single_elements = [ {"family": "Lagrange",\ "shapes": ["interval", "triangle", "tetrahedron"],\ "orders": [1, 2, 3, 4]},\ {"family": "Discontinuous Lagrange",\ "shapes": ["interval", "triangle", "tetrahedron"],\ "orders": [0, 1, 2, 3, 4]},\ {"family": "Crouzeix-Raviart",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1]},\ {"family": "Raviart-Thomas",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1, 2, 3]},\ {"family": "Discontinuous Raviart-Thomas",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1, 2, 3]},\ {"family": "Brezzi-Douglas-Marini",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1, 2, 3]},\ {"family": "Brezzi-Douglas-Fortin-Marini",\ "shapes": ["triangle"],\ "orders": [2]},\ {"family": "Nedelec 1st kind H(curl)",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1, 2, 3]},\ {"family": "Nedelec 2nd kind H(curl)",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [1, 2, 3]},\ {"family": "Regge",\ "shapes": ["triangle", "tetrahedron"],\ "orders": [0, 1, 2, 3]}] # Create some mixed elements dg0_tri = FiniteElement("DG", "triangle", 0) dg1_tri = FiniteElement("DG", "triangle", 1) cg1_tri = FiniteElement("CG", "triangle", 1) cr1_tri = FiniteElement("CR", "triangle", 1) rt1_tri = FiniteElement("RT", "triangle", 1) drt2_tri = FiniteElement("DRT", "triangle", 2) bdm1_tri = FiniteElement("BDM", "triangle", 1) ned1_tri = FiniteElement("N1curl", "triangle", 1) reg0_tri = FiniteElement("Regge", "triangle", 0) dg0_tet = FiniteElement("DG", "tetrahedron", 0) dg1_tet = FiniteElement("DG", "tetrahedron", 1) cg1_tet = FiniteElement("CG", "tetrahedron", 1) cr1_tet = FiniteElement("CR", "tetrahedron", 1) rt1_tet = FiniteElement("RT", "tetrahedron", 1) drt2_tet = FiniteElement("DRT", "tetrahedron", 2) bdm1_tet = FiniteElement("BDM", "tetrahedron", 1) ned1_tet = FiniteElement("N1curl", "tetrahedron", 1) reg0_tet = FiniteElement("Regge", "tetrahedron", 0) # Create combinations in pairs. mix_tri = [MixedElement(e) for e in xcomb([dg0_tri, dg1_tri, cg1_tri, cr1_tri, rt1_tri, drt2_tri, bdm1_tri, ned1_tri, reg0_tri], 2)] mix_tet = [MixedElement(e) for e in xcomb([dg0_tet, dg1_tet, cg1_tet, cr1_tet, rt1_tet, drt2_tet, bdm1_tet, ned1_tet, reg0_tet], 2)] mixed_elements = [MixedElement([dg0_tri]*4), MixedElement([cg1_tri]*3), MixedElement([bdm1_tri]*2),\ MixedElement([dg1_tri, cg1_tri, cr1_tri, rt1_tri, bdm1_tri, ned1_tri]),\ MixedElement([MixedElement([rt1_tri, cr1_tri]), cg1_tri, ned1_tri]),\ MixedElement([ned1_tri, dg1_tri, MixedElement([rt1_tri, cr1_tri])]),\ MixedElement([drt2_tri, cg1_tri]),\ MixedElement([dg0_tet]*4), MixedElement([cg1_tet]*3), MixedElement([bdm1_tet]*2),\ MixedElement([dg1_tet, cg1_tet, cr1_tet, rt1_tet, bdm1_tet, ned1_tet]),\ MixedElement([MixedElement([rt1_tet, cr1_tet]), cg1_tet, ned1_tet]),\ MixedElement([ned1_tet, dg1_tet, MixedElement([rt1_tet, cr1_tet])]),\ MixedElement([drt2_tet, cg1_tet]),\ MixedElement([cg1_tet, cg1_tet, cg1_tet, reg0_tet])] + mix_tri + mix_tet ffc-1.6.0/test/unit/evaluate_basis/test.py000066400000000000000000000026501255571034100205400ustar00rootroot00000000000000"""Unit tests for FFC. This test compares values computed by the two UFC functions evaluate_basis and evaluate_basis_derivatives generated by FFC to the values tabulated by FIAT and to reference values computed by an older version of FFC.""" # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-02-01 # Last changed: 2010-02-01 import unittest from .test_against_fiat import main as fiat_main from ffc.log import CRITICAL, INFO class EvaluateBasisTests(unittest.TestCase): def testAgainstFiat(self): "Test evaluate basis against FIAT.FiniteElement.tabulate()." error = fiat_main(INFO) self.assertEqual(error, 0, "Errors while testing evaluate_basis against FIAT, see fiat_errors.log for details") if __name__ == "__main__": unittest.main() ffc-1.6.0/test/unit/evaluate_basis/test_against_fiat.py000066400000000000000000000225601255571034100232530ustar00rootroot00000000000000# Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-29 # Last changed: 2013-01-31 from .cppcode import evaluate_basis_code_fiat from ufl import FiniteElement from ffc.fiatinterface import create_element, reference_cell from ffc.mixedelement import MixedElement as FFCMixedElement from ffc.log import info, error, debug import numpy import sys, os, numpy, shutil from .test_common import compile_element, print_results, compile_gcc_code,\ run_code, get_element_name, verify_values import time from ffc.log import push_level, pop_level, CRITICAL, INFO from .elements import single_elements, mixed_elements # Some random points random_points = {1: [(0.114,), (0.349,), (0.986,)], 2: [(0.114, 0.854), (0.349, 0.247), (0.986, 0.045)], 3: [(0.114, 0.854, 0.126), (0.349, 0.247, 0.457), (0.986, 0.045, 0.127)]} ffc_fail = [] gcc_fail = [] run_fail = [] dif_cri = [] dif_acc = [] correct = [] log_file = "fiat_errors.log" def matrix(points): return "{%s};" % ", ".join(["{%s}" % ", ".join([str(c) for c in p]) for p in points]) def get_data(ufl_element): "Get needed data to run tests." # Create fiat element. element = create_element(ufl_element) # The derivative order that we are interested in is the degree of the element. if isinstance(element, FFCMixedElement): deriv_order = max([e.degree() for e in element.elements()]) else: deriv_order = element.degree() # Get coordinates of the reference cell. domain, = ufl_element.domains() ref_coords = reference_cell(domain.cell().cellname()).get_vertices() # Get the locations of the fiat element dofs. elem_points = [list(L.pt_dict.keys())[0] for L in element.dual_basis()] # Add some random points. geo_dim = domain.geometric_dimension() points = elem_points + random_points[geo_dim] return (element, points, geo_dim, ref_coords, deriv_order) def compute_derivative_combinations(deriv_order, geo_dim): "Compute combinations of derivatives in spatial directions (like code snippet)." if deriv_order == 0: return [(0,)*geo_dim] num_derivatives = geo_dim**deriv_order combinations = [[0]*deriv_order for n in range(num_derivatives)] for i in range(1, num_derivatives): for k in range(i): j = deriv_order - 1 while j + 1 > 0: j -= 1 if combinations[i][j] + 1 > geo_dim - 1: combinations[i][j] = 0 else: combinations[i][j] += 1 break # Convert to fiat tuples. for i in range(num_derivatives): combinations[i] = to_fiat_tuple(combinations[i], geo_dim) return combinations def to_fiat_tuple(comb, geo_dim): """Convert a list of combinations of derivatives to a fiat tuple of derivatives. FIAT expects a list with the number of derivatives in each spatial direction. E.g., in 2D: u_{xyy} --> [0, 1, 1] in FFC --> (1, 2) in FIAT.""" new_comb = [0]*geo_dim if comb == []: return tuple(new_comb) for i in range(geo_dim): new_comb[i] = comb.count(i) return tuple(new_comb) def get_fiat_values(ufl_element): """Create a FIAT element and use it to tabulate the values on the reference element. The return values is a dictionary with keys equal to the derivative order and values is a matrix where each row is the basis values at a point. E.g., {0:[[1,0,0],[0,1,0], [0,0,1]]}.""" # Get data and tabulate basis values. element, points, geo_dim, ref_coords, deriv_order = get_data(ufl_element) values = element.tabulate(deriv_order, points) return_values = {} value_shape = element.value_shape() # Rearrange values to match what we get from evaluate_basis*() for n in range(deriv_order + 1): combinations = compute_derivative_combinations(n, geo_dim) vals = [] # If we're evaluating the basis functions, use all points, but if we're # evaluating the derivatives, just use the 3 arbitrary points to avoid # the number of tests exploding with spacedim**2. if n == 0: new_points = points else: new_points = points[-3:] for p, point in enumerate(new_points): if n != 0: p += element.space_dimension() row = [[] for i in range(element.space_dimension())] for i in range(element.space_dimension()): if value_shape == (): for deriv in combinations: deriv_vals = values[deriv] row[i].append(deriv_vals[i][p]) elif len(value_shape) == 1: for c in range(element.value_shape()[0]): for deriv in combinations: deriv_vals = values[deriv] row[i].append(deriv_vals[i][c][p]) elif len(value_shape) == 2: for j in range(element.value_shape()[0]): for k in range(element.value_shape()[1]): for deriv in combinations: deriv_vals = values[deriv] row[i].append(deriv_vals[i][j][k][p]) else: print(values) error("Did not expect tensor elements of rank > 2") new_row = [] for r in row: new_row += r vals.append(new_row) return_values[n] = numpy.array(vals) return return_values def get_ffc_values(ufl_element): "Get the values from evaluate_basis and evaluate_basis_derivatives." # Get data and tabulate basis values. element, points, geo_dim, ref_coords, deriv_order = get_data(ufl_element) # Get relevant element name. element_name = get_element_name(ufl_element) # Create g++ code and compile. num_coords = len(ref_coords) options = {"element": element_name, "dim": geo_dim, "num_points": len(points), "points": matrix(points), "cell_ref_coords": "double cell_ref_coords[%d][%d] = %s" % (num_coords, geo_dim, matrix(ref_coords)), "num_coords": num_coords} error = compile_gcc_code(ufl_element, evaluate_basis_code_fiat % options, gcc_fail, log_file) if error: return None # Loop derivative order and compute values. ffc_values = {} for n in range(deriv_order + 1): values = run_code(ufl_element, n, run_fail, log_file) if values is None: return None ffc_values[n] = values return ffc_values def verify_element(num_elements, i, ufl_element): info("\nVerifying element %d of %d: %s" % (i, num_elements, str(ufl_element))) error = compile_element(ufl_element, ffc_fail, log_file) # Return if test failed if error: return 1 # Get FIAT values that are formatted in the same way as the values from # evaluate_basis and evaluate_basis_derivatives. # t = time.time() fiat_values = get_fiat_values(ufl_element) # print "fiat_vals: ", time.time() - t # Get FFC values. t = time.time() ffc_values = get_ffc_values(ufl_element) if ffc_values is None: return 1 debug(" time to compute FFC values: %f" % (time.time() - t)) # Compare values and return number of tests. return verify_values(ufl_element, fiat_values, ffc_values, dif_cri, dif_acc, correct, log_file) def main(debug_level): "Call evaluate basis for a range of different elements." push_level(debug_level) # Remove old log file. if os.path.isfile(log_file): os.remove(log_file) # Change to temporary folder and copy form files if not os.path.isdir("tmp"): os.mkdir("tmp") os.chdir("tmp") # Create list of all elements that have to be tested. elements = [] for element in single_elements: for shape in element["shapes"]: for order in element["orders"]: elements.append(FiniteElement(element["family"], shape, order)) # Add the mixed elements elements += mixed_elements num_elements = len(elements) # Test all elements num_tests = 0 msg = "Verifying evaluate_basis and evaluate_basis_derivatives for elements" info("\n" + msg + "\n" + len(msg)*"-") for i, ufl_element in enumerate(elements): num_tests += verify_element(num_elements, i + 1, ufl_element) # print results error = print_results(num_tests, ffc_fail, gcc_fail, run_fail, dif_cri, dif_acc, correct) if not error: # Remove temporary directory os.chdir(os.pardir) shutil.rmtree("tmp") pop_level() return error if __name__ == "__main__": # sys.exit(main(sys.argv[1:])) sys.exit(main(INFO)) ffc-1.6.0/test/unit/evaluate_basis/test_against_ref_values.py000066400000000000000000000174431255571034100244670ustar00rootroot00000000000000# Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-18 # Last changed: 2010-01-29 from __future__ import print_function from .cppcode import evaluate_basis_code from ufl import FiniteElement, MixedElement from instant.output import get_status_output import sys, os, pickle, numpy, shutil from .elements import single_elements, mixed_elements ffc_failed = [] gcc_failed = [] run_failed = [] def check_results(values, reference): "Check results and print summary." # Check if we have missing values. missing_vals = [] num_refs = len(list(reference.keys())) for element in list(reference.keys()): if not element in values: missing_vals.append(element) missing_refs = [] diffs = [] correct = [] num_ok = 0 print("") sorted_elements = sorted(values.keys()) for element in sorted_elements: vals = values[element] print("\nResults for %s:" % element) if vals is None: print("Error") continue # Get reference values if not element in reference: missing_refs.append(element) print("Missing reference") continue refs = reference[element] tol = 1e-12 e = max(abs(vals - refs)) if e < tol: num_ok += 1 print("OK: (diff = %g)" % e) correct.append(element) else: print("*** (diff = %g)" % e) diffs.append(element) if ffc_failed == gcc_failed == run_failed == missing_refs == diffs == missing_vals: print("\nAll %d elements verified OK" % len(reference)) return 0 else: print("\n*** The values were correct for the following elements:\n" + "\n\n".join(correct)) if len(ffc_failed) > 0: print("\n*** FFC compilation failed for the following elements:\n" + "\n\n".join(ffc_failed)) if len(gcc_failed) > 0: print("\n*** g++ compilation failed for the following elements:\n" + "\n\n".join(gcc_failed)) if len(run_failed) > 0: print("\n*** Evaluation failed (seg. fault?) for the following elements:\n" + "\n\n".join(run_failed)) if len(missing_refs) > 0: print("\n*** No reference values were found for the following elements:\n" + "\n\n".join(missing_refs)) if len(missing_vals) > 0: print("\n*** No values were computed the following %d elements:\n" % len(missing_vals) +\ "\n\n".join(missing_vals)) if len(diffs) > 0: print("\n*** Difference in values were found for the following elements:\n" + "\n\n".join(diffs)) num_ffc = len(ffc_failed) num_gcc = len(gcc_failed) num_run = len(run_failed) num_ref = len(missing_refs) num_val = len(missing_vals) num_cor = len(correct) num_dif = len(diffs) print("\nNum ref elements: ", num_refs) print("Num ffc fail: ", num_ffc) print("Num gcc fail: ", num_gcc) print("Num run fail: ", num_run) print("Num miss ref: ", num_ref) print("Num miss val: ", num_val) print("Num correct: ", num_cor) print("Num diff: ", num_dif) print("Total: ", num_ffc + num_gcc + num_run + num_ref + num_val + num_cor + num_dif) return 1 def compile_element(ufl_element): "Create UFL form file with a single element in it and compile it with FFC" f = open("test.ufl", "w") if isinstance(ufl_element, (FiniteElement, MixedElement)): f.write("element = " + repr(ufl_element)) f.close() error, out = get_status_output("ffc test.ufl") if error: ffc_failed.append(repr(ufl_element)) return error def get_element_name(ufl_element): "Extract relevant element name from header file." f = open("test.h") lines = f.readlines() f.close() signature = repr(ufl_element) name = None for e, l in enumerate(lines): if "class" in l and "finite_element" in l: name = l if signature in l: break if name is None: raise RuntimeError("No finite element class found") return name.split()[1][:-1] def compute_values(ufl_element): "Compute values of basis functions for given element." # Get relevant element name element_name = get_element_name(ufl_element) # Create g++ code options = {"element": element_name} code = evaluate_basis_code % options f = open("evaluate_basis.cpp", "w") f.write(code) f.close() # Get UFC flags ufc_cflags = get_status_output("pkg-config --cflags ufc-1")[1].strip() # Compile g++ code c = "g++ %s -Wall -Werror -o evaluate_basis evaluate_basis.cpp" % ufc_cflags error, output = get_status_output(c) if error: gcc_failed.append(repr(ufl_element)) return None # Run compiled code and get values error, output = get_status_output(".%sevaluate_basis" % os.path.sep) if error: run_failed.append(repr(ufl_element)) return None values = [float(value) for value in output.split(" ") if len(value) > 0] return numpy.array(values) def print_refs(): if os.path.isfile("reference.pickle"): reference = pickle.load(open("reference.pickle", "r")) for elem, vals in list(reference.items()): print() print(elem) print(vals) else: raise RuntimeError("No references to print") def main(args): "Call evaluate basis for a range of different elements." if "refs" in args: print_refs() return 0 # Change to temporary folder and copy form files if not os.path.isdir("tmp"): os.mkdir("tmp") os.chdir("tmp") values = {} # Evaluate basis for single elements print("\nComputing evaluate_basis for single elements") for element in single_elements: for shape in element["shapes"]: for order in element["orders"]: ufl_element = FiniteElement(element["family"], shape, order) print("Compiling element: ", str(ufl_element)) error = compile_element(ufl_element) if error: values[repr(ufl_element)] = None continue print("Computing values") values[repr(ufl_element)] = compute_values(ufl_element) # Evaluate basis for mixed elements print("\nComputing evaluate_basis for mixed elements") for ufl_element in mixed_elements: print("Compiling element: ", str(ufl_element)) error = compile_element(ufl_element) if error: values[repr(ufl_element)] = None continue print("Computing values") values[repr(ufl_element)] = compute_values(ufl_element) # Load or update reference values os.chdir(os.pardir) if os.path.isfile("reference.pickle"): reference = pickle.load(open("reference.pickle", "r")) else: print("Unable to find reference values, storing current values.") pickle.dump(values, open("reference.pickle", "w")) return 0 # Check results error = check_results(values, reference) if not error: # Remove temporary directory shutil.rmtree("tmp") return error if __name__ == "__main__": sys.exit(main(sys.argv[1:])) ffc-1.6.0/test/unit/evaluate_basis/test_common.py000066400000000000000000000154461255571034100221170ustar00rootroot00000000000000# Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-29 # Last changed: 2013-02-06 from ffc.log import info, info_red, info_blue, info_green, debug from instant.output import get_status_output import numpy import os import sys from six.moves import xrange as range tol = 1e-14 crit_tol = 1e-8 def xcomb(items, n): "Create n-tuples with combinations of items." if n==0: yield [] else: for i in range(len(items)): for cc in xcomb(items[:i]+items[i+1:],n-1): yield [items[i]]+cc # Global log file def log_error(message, log_file): "Log error message." log = open(os.path.join(os.pardir, log_file), "a") log.write("\n" + "-"*79 + "\n" + message + "\n" + "-"*79 + "\n") log.close() def print_results(num_tests, ffc_fail, gcc_fail, run_fail, dif_cri, dif_acc, correct): "Check print summary." num_ffc = len(ffc_fail) num_gcc = len(gcc_fail) num_run = len(run_fail) num_cri = len(dif_cri) num_acc = len(dif_acc) num_cor = len(correct) if ffc_fail == gcc_fail == run_fail == dif_cri == dif_acc == []: info_green("\nAll %d tests OK" % num_tests) return 0 num_tests = str(num_tests) num_tot = str(num_ffc + num_gcc + num_run + num_cor + num_cri + num_acc) num_ffc = str(num_ffc) num_gcc = str(num_gcc) num_run = str(num_run) num_cor = str(num_cor) num_cri = str(num_cri) num_acc = str(num_acc) num_ffc = " "*(len(num_tests) - len(num_ffc)) + num_ffc num_gcc = " "*(len(num_tests) - len(num_gcc)) + num_gcc num_run = " "*(len(num_tests) - len(num_run)) + num_run num_cor = " "*(len(num_tests) - len(num_cor)) + num_cor num_cri = " "*(len(num_tests) - len(num_cri)) + num_cri num_acc = " "*(len(num_tests) - len(num_acc)) + num_acc num_tot = " "*(len(num_tests) - len(num_tot)) + num_tot info("\n\n*************** SUMMARY ***************") info("\n Number of tests: " + num_tests) info("\n Num ffc fail: " + num_ffc) info(" Num gcc fail: " + num_gcc) info(" Num run fail: " + num_run) info((" Num correct: (tol. %g): " % tol) + num_cor) info((" Num diff. critical: (tol. %g): " % crit_tol) + num_cri) info(" Num diff. acceptable: " + num_acc) info(" Total: " + num_tot) info("") # Return 0 if there was only acceptable errors. if ffc_fail == gcc_fail == run_fail == dif_cri == []: return 0 return 1 def compile_element(ufl_element, ffc_fail, log_file): "Create UFL form file with a single element in it and compile it with FFC" f = open("test.ufl", "w") f.write("element = " + repr(ufl_element)) f.close() error, output = get_status_output("ffc test.ufl") if error: info_red("FFC compilation failed.") log_error("element: %s,\n%s\n" % (str(ufl_element), output), log_file) ffc_fail.append(str(ufl_element)) return error def get_element_name(ufl_element): "Extract relevant element name from header file." f = open("test.h") lines = f.readlines() f.close() signature = repr(ufl_element) name = None for e, l in enumerate(lines): if "class" in l and "finite_element" in l: name = l if signature in l: break if name is None: raise RuntimeError("No finite element class found") return name.split()[1][:-1] def compile_gcc_code(ufl_element, code, gcc_fail, log_file): # Write code. f = open("evaluate_basis.cpp", "w") f.write(code) f.close() # Get UFC flags ufc_cflags = get_status_output("pkg-config --cflags ufc-1")[1].strip() # Compile g++ code c = "g++ %s -Wall -Werror -o evaluate_basis evaluate_basis.cpp" % ufc_cflags f = open("compile.sh", "w") f.write(c + "\n") f.close() error, output = get_status_output(c) if error: info_red("GCC compilation failed.") log_error("element: %s,\n%s\n" % (str(ufl_element), output), log_file) gcc_fail.append(str(ufl_element)) if error and ("-f" in sys.argv or "--failfast" in sys.argv): print("FAIL") exit(1) return error def run_code(ufl_element, deriv_order, run_fail, log_file): "Compute values of basis functions for given element." # Run compiled code and get values error, output = get_status_output(".%sevaluate_basis %d" % (os.path.sep, deriv_order)) if error: info_red("Runtime error (segmentation fault?).") log_error("element: %s,\n%s\n" % (str(ufl_element), output), log_file) run_fail.append(str(ufl_element)) return None values = [[float(value) for value in line.strip().split(" ") if value] for line in output.strip().split("\n")] return numpy.array(values) def verify_values(ufl_element, ref_values, ffc_values, dif_cri, dif_acc, correct, log_file): "Check the values from evaluate_basis*() against some reference values." num_tests = len(ffc_values) if num_tests != len(ref_values): raise RuntimeError("The number of computed values is not equal to the number of reference values.") errors = [str(ufl_element)] for deriv_order in range(num_tests): s = "" if deriv_order == 0: s = " evaluate_basis" else: s = " evaluate_basis_derivatives, order = %d" % deriv_order e = abs(ffc_values[deriv_order] - ref_values[deriv_order]) error = e.max() if error > tol: if error > crit_tol: m = "%s failed: error = %s (crit_tol: %s)" % (s, str(error), str(crit_tol)) info_red(m) dif_cri.append(str(ufl_element)) s = s + "\n" + m else: m = "%s ok: error = %s (tol: %s)" % (s, str(error), str(tol)) info_blue(m) dif_acc.append(str(ufl_element)) s = s + "\n" + m errors.append(s) else: info_green("%s OK" % s) correct.append(str(ufl_element)) # Log errors if any if len(errors) > 1: log_error("\n".join(errors), log_file) return num_tests ffc-1.6.0/test/unit/misc/000077500000000000000000000000001255571034100151505ustar00rootroot00000000000000ffc-1.6.0/test/unit/misc/__init__.py000066400000000000000000000000001255571034100172470ustar00rootroot00000000000000ffc-1.6.0/test/unit/misc/test.py000066400000000000000000000340411255571034100165030ustar00rootroot00000000000000"Unit tests for FFC" # Copyright (C) 2007-2009 Anders Logg # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Modified by Marie E. Rognes, 2010 # # First added: 2007-02-06 # Last changed: 2009-02-24 from __future__ import print_function import unittest import sys import numpy import math import os import instant from time import time sys.path.append(os.path.join(os.pardir, os.pardir)) from ufl import * from ffc.fiatinterface import create_element as create from ffc import jit interval = [(0,), (1,)] triangle = [(0, 0), (1, 0), (0, 1)] tetrahedron = [(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)] num_points = 5 def random_point(shape): w = numpy.random.random(len(shape)) return sum([numpy.array(shape[i])*w[i] for i in range(len(shape))]) / sum(w) class SpaceDimensionTests(unittest.TestCase): def testContinuousLagrange(self): "Test space dimensions of continuous Lagrange elements." P1 = create(FiniteElement("Lagrange", "triangle", 1)) self.assertEqual(P1.space_dimension(), 3) P2 = create(FiniteElement("Lagrange", "triangle", 2)) self.assertEqual(P2.space_dimension(), 6) P3 = create(FiniteElement("Lagrange", "triangle", 3)) self.assertEqual(P3.space_dimension(), 10) def testDiscontinuousLagrange(self): "Test space dimensions of discontinuous Lagrange elements." P0 = create(FiniteElement("DG", "triangle", 0)) self.assertEqual(P0.space_dimension(), 1) P1 = create(FiniteElement("DG", "triangle", 1)) self.assertEqual(P1.space_dimension(), 3) P2 = create(FiniteElement("DG", "triangle", 2)) self.assertEqual(P2.space_dimension(), 6) P3 = create(FiniteElement("DG", "triangle", 3)) self.assertEqual(P3.space_dimension(), 10) class FunctionValueTests(unittest.TestCase): """ These tests examine tabulate gives the correct answers for a the supported (non-mixed) elements of polynomial degree less than or equal to 3 """ # FIXME: Add tests for NED and BDM/RT in 3D. def _check_function_values(self, points, element, reference): for x in points: table = element.tabulate(0, (x,)) basis = table[list(table.keys())[0]] for i in range(len(basis)): if element.value_shape() == (): self.assertAlmostEqual(float(basis[i]), reference[i](x)) else: for k in range(element.value_shape()[0]): self.assertAlmostEqual(basis[i][k][0], reference[i](x)[k]) def testContinuousLagrange1D(self): "Test values of continuous Lagrange functions in 1D." element = create(FiniteElement("Lagrange", "interval", 1)) reference = [lambda x: 1 - x[0], lambda x: x[0]] points = [random_point(interval) for i in range(num_points)] self._check_function_values(points, element, reference) def testContinuousLagrange2D(self): "Test values of continuous Lagrange functions in 2D." element = create(FiniteElement("Lagrange", "triangle", 1)) reference = [lambda x: 1 - x[0] - x[1], lambda x: x[0], lambda x: x[1]] points = [random_point(triangle) for i in range(num_points)] self._check_function_values(points, element, reference) def testContinuousLagrange3D(self): "Test values of continuous Lagrange functions in 3D." element = create(FiniteElement("Lagrange", "tetrahedron", 1)) reference = [lambda x: 1 - x[0] - x[1] - x[2], lambda x: x[0], lambda x: x[1], lambda x: x[2]] points = [random_point(tetrahedron) for i in range(num_points)] self._check_function_values(points, element, reference) def testDiscontinuousLagrange1D(self): "Test values of discontinuous Lagrange functions in 1D." element = create(FiniteElement("DG", "interval", 1)) reference = [lambda x: 1 - x[0], lambda x: x[0]] points = [random_point(interval) for i in range(num_points)] self._check_function_values(points, element, reference) def testDiscontinuousLagrange2D(self): "Test values of discontinuous Lagrange functions in 2D." element = create(FiniteElement("DG", "triangle", 1)) reference = [lambda x: 1 - x[0] - x[1], lambda x: x[0], lambda x: x[1]] points = [random_point(triangle) for i in range(num_points)] self._check_function_values(points, element, reference) def testDiscontinuousLagrange3D(self): "Test values of discontinuous Lagrange functions in 3D." element = create(FiniteElement("DG", "tetrahedron", 1)) reference = [lambda x: 1 - x[0] - x[1] - x[2], lambda x: x[0], lambda x: x[1], lambda x: x[2]] points = [random_point(tetrahedron) for i in range(num_points)] self._check_function_values(points, element, reference) def testBDM1_2D(self): "Test values of BDM1." element = create(FiniteElement("Brezzi-Douglas-Marini", "triangle", 1)) reference = [lambda x: (2*x[0], -x[1]), lambda x: (-x[0], 2*x[1]), lambda x: (2 - 2*x[0] - 3*x[1], x[1]), lambda x: (- 1 + x[0] + 3*x[1], - 2*x[1]), lambda x: (-x[0], -2 + 3*x[0] + 2*x[1]), lambda x: (2*x[0], 1 - 3*x[0] - x[1])] points = [random_point(triangle) for i in range(num_points)] self._check_function_values(points, element, reference) def testRT1_2D(self): "Test values of RT1." element = create(FiniteElement("Raviart-Thomas", "triangle", 1)) reference = [lambda x: (x[0], x[1]), lambda x: (1 - x[0], -x[1]), lambda x: (x[0], x[1] - 1)] points = [random_point(triangle) for i in range(num_points)] self._check_function_values(points, element, reference) def testRT2_2D(self): "Test values of RT2." element = create(FiniteElement("Raviart-Thomas", "triangle", 2)) reference = [ lambda x: (-x[0] + 3*x[0]**2, -x[1] + 3*x[0]*x[1]), lambda x: (-x[0] + 3*x[0]*x[1], -x[1] + 3*x[1]**2), lambda x: ( 2 - 5*x[0] - 3*x[1] + 3*x[0]*x[1] + 3*x[0]**2, -2*x[1] + 3*x[0]*x[1] + 3*x[1]**2), lambda x: (-1.0 + x[0] + 3*x[1] - 3*x[0]*x[1], x[1] - 3*x[1]**2), lambda x: (2*x[0] - 3*x[0]*x[1] - 3*x[0]**2, -2 + 3*x[0]+ 5*x[1] - 3*x[0]*x[1] - 3*x[1]**2), lambda x: (- x[0] + 3*x[0]**2, + 1 - 3*x[0] - x[1] + 3*x[0]*x[1]), lambda x: (6*x[0] - 3*x[0]*x[1] - 6*x[0]**2, 3*x[1] - 6*x[0]*x[1] - 3*x[1]**2), lambda x: (3*x[0] - 6*x[0]*x[1] - 3*x[0]**2, 6*x[1]- 3*x[0]*x[1] - 6*x[1]**2), ] def testDRT1_2D(self): "Test values of DRT1." element = create(FiniteElement("Discontinuous Raviart-Thomas", "triangle", 1)) reference = [lambda x: (x[0], x[1]), lambda x: (1 - x[0], -x[1]), lambda x: (x[0], x[1] - 1)] points = [random_point(triangle) for i in range(num_points)] self._check_function_values(points, element, reference) def testDRT2_2D(self): "Test values of DRT2." element = create(FiniteElement("Discontinuous Raviart-Thomas", "triangle", 2)) reference = [ lambda x: (-x[0] + 3*x[0]**2, -x[1] + 3*x[0]*x[1]), lambda x: (-x[0] + 3*x[0]*x[1], -x[1] + 3*x[1]**2), lambda x: ( 2 - 5*x[0] - 3*x[1] + 3*x[0]*x[1] + 3*x[0]**2, -2*x[1] + 3*x[0]*x[1] + 3*x[1]**2), lambda x: (-1.0 + x[0] + 3*x[1] - 3*x[0]*x[1], x[1] - 3*x[1]**2), lambda x: (2*x[0] - 3*x[0]*x[1] - 3*x[0]**2, -2 + 3*x[0]+ 5*x[1] - 3*x[0]*x[1] - 3*x[1]**2), lambda x: (- x[0] + 3*x[0]**2, + 1 - 3*x[0] - x[1] + 3*x[0]*x[1]), lambda x: (6*x[0] - 3*x[0]*x[1] - 6*x[0]**2, 3*x[1] - 6*x[0]*x[1] - 3*x[1]**2), lambda x: (3*x[0] - 6*x[0]*x[1] - 3*x[0]**2, 6*x[1]- 3*x[0]*x[1] - 6*x[1]**2), ] points = [random_point(triangle) for i in range(num_points)] self._check_function_values(points, element, reference) def testNED1_2D(self): "Test values of NED1." element = create(FiniteElement("N1curl", "triangle", 1)) reference = [ lambda x: (-x[1], x[0]), lambda x: ( x[1], 1 - x[0]), lambda x: ( 1 - x[1], x[0]), ] points = [random_point(triangle) for i in range(num_points)] self._check_function_values(points, element, reference) def testRT1_3D(self): element = create(FiniteElement("RT", "tetrahedron", 1)) reference = [lambda x: (-x[0], -x[1], -x[2]), lambda x: (-1.0 + x[0], x[1], x[2]), lambda x: (-x[0], 1.0 - x[1], -x[2]), lambda x: ( x[0], x[1], -1.0 + x[2]) ] points = [random_point(tetrahedron) for i in range(num_points)] self._check_function_values(points, element, reference) def testDRT1_3D(self): element = create(FiniteElement("DRT", "tetrahedron", 1)) reference = [lambda x: (-x[0], -x[1], -x[2]), lambda x: (-1.0 + x[0], x[1], x[2]), lambda x: (-x[0], 1.0 - x[1], -x[2]), lambda x: ( x[0], x[1], -1.0 + x[2]) ] points = [random_point(tetrahedron) for i in range(num_points)] self._check_function_values(points, element, reference) def testBDM1_3D(self): element = create(FiniteElement("BDM", "tetrahedron", 1)) reference = [ lambda x: (-3*x[0], x[1], x[2]), lambda x: (x[0], -3*x[1], x[2]), lambda x: (x[0], x[1], -3*x[2]), lambda x: (-3.0 + 3*x[0] + 4*x[1] + 4*x[2], -x[1], -x[2]), lambda x: (1.0 - x[0] - 4*x[1], 3*x[1], -x[2]), lambda x: (1.0 - x[0] - 4*x[2], -x[1], 3*x[2]), lambda x: (x[0], 3.0 - 4*x[0] - 3*x[1] - 4*x[2], x[2]), lambda x: (-3*x[0], -1.0 + 4*x[0] + x[1], x[2]), lambda x: (x[0], -1.0 + x[1] + 4*x[2], -3*x[2]), lambda x: (-x[0], -x[1], -3.0 + 4*x[0] + 4*x[1] + 3*x[2]), lambda x: (3*x[0], -x[1], 1.0 - 4*x[0] - x[2]), lambda x: (-x[0], 3*x[1], 1.0 - 4*x[1] - x[2]) ] points = [random_point(tetrahedron) for i in range(num_points)] self._check_function_values(points, element, reference) def testNED1_3D(self): element = create(FiniteElement("N1curl", "tetrahedron", 1)) reference = [ lambda x: (0.0, -x[2], x[1]), lambda x: (-x[2], 0.0, x[0]), lambda x: (-x[1], x[0], 0.0), lambda x: ( x[2], x[2], 1.0 - x[0] - x[1]), lambda x: (x[1], 1.0 - x[0] - x[2], x[1]), lambda x: (1.0 - x[1] - x[2], x[0], x[0]) ] points = [random_point(tetrahedron) for i in range(num_points)] self._check_function_values(points, element, reference) class JITTests(unittest.TestCase): def testPoisson(self): "Test that JIT compiler is fast enough." # FIXME: Use local cache: cache_dir argument to instant.build_module #options = {"log_level": INFO + 5} #options = {"log_level": 5} options = {"log_level": WARNING} # Define two forms with the same signatures element = FiniteElement("Lagrange", "triangle", 1) v = TestFunction(element) u = TrialFunction(element) f = Coefficient(element) g = Coefficient(element) a0 = f*dot(grad(v), grad(u))*dx a1 = g*dot(grad(v), grad(u))*dx # Strange this needs to be done twice # Compile a0 so it will be in the cache (both in-memory and disk) jit(a0, options) jit(a0, options) # Compile a0 again (should be really fast, using in-memory cache) t = time() jit(a0, options) dt0 = time() - t print("") # Compile a1 (should be fairly fast, using disk cache) t = time() jit(a1, options) dt1 = time() - t # Good values dt0_good = 0.005 dt1_good = 0.01 print("") print("JIT in-memory cache:", dt0) print("JIT disk cache: ", dt1) print("Reasonable values are %g and %g" % (dt0_good, dt1_good)) # Check times self.assertTrue(dt0 < 10*dt0_good) self.assertTrue(dt1 < 10*dt1_good) if __name__ == "__main__": unittest.main() ffc-1.6.0/test/unit/symbolics/000077500000000000000000000000001255571034100162215ustar00rootroot00000000000000ffc-1.6.0/test/unit/symbolics/__init__.py000066400000000000000000000000001255571034100203200ustar00rootroot00000000000000ffc-1.6.0/test/unit/symbolics/test.py000077500000000000000000000107011255571034100175540ustar00rootroot00000000000000#!/usr/bin/env python "Test suite for the symbolic classes." # Copyright (C) 2009-2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2009-07-11 # Last changed: 2010-02-01 import unittest # FFC modules from ffc.quadrature.symbolics import * # Import tests from .testfloat import TestFloat from .testsymbol import TestSymbol from .testproduct import TestProduct from .testsum import TestSum from .testfraction import TestFraction from .testfloatoperators import TestFloatOperators from .testsymboloperators import TestSymbolOperators from .testproductoperators import TestProductOperators from .testsumoperators import TestSumOperators from .testfractionoperators import TestFractionOperators from .testmixedsymbols import TestMixedSymbols from .testexpandoperations import TestExpandOperations from .testreducevartype import TestReduceVarType from .testreduceoperations import TestReduceOperations from .testnotfinished import TestNotFinished from .testdgelastodyn import TestDGElastoDyn from .testreducegip import TestReduceGIP from .testpoisson import TestPoisson from .testelasticity2d import TestElasticity2D from .testelasticityterm import TestElasticityTerm from .testelasweighted import TestElasWeighted from .testelasweighted2 import TestElasWeighted2 from .testrealexamples import TestRealExamples class TestSingle(unittest.TestCase): def testSingle(self): "Run a single test." expr =\ Fraction( Symbol('W1', GEO), Sum([ Fraction( Sum([ Symbol('F1', IP), Symbol('F2', IP) ]), Sum([ Symbol('K_00', GEO), Symbol('K_01', GEO) ]) ), Fraction( Sum([ Symbol('F3', IP), Symbol('F4', IP) ]), Sum([ Symbol('K_10', GEO), Symbol('K_11', GEO), ]) ) ]) ) # print "RED: ", expr red = expr.expand().reduce_vartype(IP) red = expr.reduce_vartype(IP) # red = expr.reduce_vartype(IP) # print "expr: ", expr.expand() # comb = Sum([Product([f, r]) for f,r in red]).expand() # print "\nsum: \n", comb # print "eval expr: ", eval(str(expr)) # print "eval comb: ", eval(str(comb)) # self.assertAlmostEqual(eval(str(expr)), eval(str(comb))) def suite(): suite = unittest.TestSuite() # Classes and member functions suite.addTest(TestFloat('testFloat')) suite.addTest(TestSymbol('testSymbol')) suite.addTest(TestProduct('testProduct')) suite.addTest(TestSum('testSum')) suite.addTest(TestFraction('testFraction')) suite.addTest(TestFloatOperators('testFloatOperators')) suite.addTest(TestSymbolOperators('testSymbolOperators')) suite.addTest(TestProductOperators('testProductOperators')) suite.addTest(TestSumOperators('testSumOperators')) suite.addTest(TestFractionOperators('testFractionOperators')) suite.addTest(TestMixedSymbols('testMixedSymbols')) suite.addTest(TestExpandOperations('testExpandOperations')) suite.addTest(TestReduceVarType('testReduceVarType')) suite.addTest(TestReduceOperations('testReduceOperations')) # Misc. suite.addTest(TestNotFinished('testNotFinished')) # 'Real' expressions (expand and reduce) suite.addTest(TestDGElastoDyn('testDGElastoDyn')) suite.addTest(TestReduceGIP('testReduceGIP')) suite.addTest(TestPoisson('testPoisson')) suite.addTest(TestElasticity2D('testElasticity2D')) # 'Real' expressions (generate code) suite.addTest(TestElasticityTerm('testElasticityTerm')) suite.addTest(TestElasWeighted('testElasWeighted')) suite.addTest(TestElasWeighted2('testElasWeighted2')) # Various bug encounters suite.addTest(TestRealExamples('testRealExamples')) return suite if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(suite()) # runner.run(TestSingle('testSingle')) ffc-1.6.0/test/unit/symbolics/testdgelastodyn.py000077500000000000000000000054361255571034100220230ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.symbolics import * from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) class TestDGElastoDyn(unittest.TestCase): def testDGElastoDyn(self): expr = Product([ Sum([ Symbol("F0", IP), Symbol("F1", IP) ]), Fraction( Symbol("w4", GEO), Symbol("w3", GEO) ), Fraction( Product([ Symbol("w2", GEO), Symbol("w5", GEO) ]), Symbol("w6", GEO) ) ]) # print "\nDGElastoDyn" # start = time.time() expr_exp = expr.expand() # print "DGElastoDyn: time, expand(): ", time.time() - start # start = time.time() expr_red = expr_exp.reduce_ops() # print "DGElastoDyn: time, reduce_ops(): ", time.time() - start # print "expr.ops(): ", expr.ops() # print "expr_exp.ops(): ", expr_exp.ops() # print "expr_red.ops(): ", expr_red.ops() # print "expr:\n", expr # print "exp:\n", expr_exp # print "red:\n", expr_red F0, F1, w2, w3, w4, w5, w6 = (3.12, -8.1, -45.3, 17.5, 2.2, 5.3, 9.145) self.assertAlmostEqual(eval(str(expr)), eval(str(expr_exp))) self.assertAlmostEqual(eval(str(expr)), eval(str(expr_red))) self.assertEqual(expr.ops(), 6) self.assertEqual(expr_exp.ops(), 11) self.assertEqual(expr_red.ops(), 6) if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestDGElastoDyn('testDGElastoDyn')) ffc-1.6.0/test/unit/symbolics/testelasticity2d.py000077500000000000000000000247361255571034100221120ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations from ffc.quadrature.symbolics import * from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) class TestElasticity2D(unittest.TestCase): def testElasticity2D(self): elasticity = """(((Jinv_00*FE0_C0_D10_ip_j + Jinv_10*FE0_C0_D01_ip_j)*2*(Jinv_00*FE0_C0_D10_ip_k + Jinv_10*FE0_C0_D01_ip_k)*2 + ((Jinv_00*FE0_C1_D10_ip_j + Jinv_10*FE0_C1_D01_ip_j) + (Jinv_01*FE0_C0_D10_ip_j + Jinv_11*FE0_C0_D01_ip_j))*((Jinv_00*FE0_C1_D10_ip_k + Jinv_10*FE0_C1_D01_ip_k) + (Jinv_01*FE0_C0_D10_ip_k + Jinv_11*FE0_C0_D01_ip_k))) + ((Jinv_01*FE0_C1_D10_ip_j + Jinv_11*FE0_C1_D01_ip_j)*2*(Jinv_01*FE0_C1_D10_ip_k + Jinv_11*FE0_C1_D01_ip_k)*2 + ((Jinv_01*FE0_C0_D10_ip_j + Jinv_11*FE0_C0_D01_ip_j) + (Jinv_00*FE0_C1_D10_ip_j + Jinv_10*FE0_C1_D01_ip_j))*((Jinv_01*FE0_C0_D10_ip_k + Jinv_11*FE0_C0_D01_ip_k) + (Jinv_00*FE0_C1_D10_ip_k + Jinv_10*FE0_C1_D01_ip_k))))*0.25*W4_ip*det""" expr = Product([ Sum([ Sum([ Product([ Sum([ Product([Symbol("Jinv_00", GEO), Symbol("FE0_C0_D10_ip_j", BASIS)]) , Product([Symbol("Jinv_10", GEO), Symbol("FE0_C0_D01_ip_j", BASIS)]) ]) , FloatValue(2) , Sum([ Product([Symbol("Jinv_00", GEO), Symbol("FE0_C0_D10_ip_k", BASIS)]) , Product([Symbol("Jinv_10", GEO), Symbol("FE0_C0_D01_ip_k", BASIS)]) ]) , FloatValue(2) ]) , Product([ Sum([ Sum([ Product([Symbol("Jinv_00", GEO), Symbol("FE0_C1_D10_ip_j", BASIS)]) , Product([Symbol("Jinv_10", GEO), Symbol("FE0_C1_D01_ip_j", BASIS)]) ]) , Sum([ Product([Symbol("Jinv_01", GEO), Symbol("FE0_C0_D10_ip_j", BASIS)]) , Product([Symbol("Jinv_11", GEO), Symbol("FE0_C0_D01_ip_j", BASIS)]) ]) ]) , Sum([ Sum([ Product([Symbol("Jinv_00", GEO), Symbol("FE0_C1_D10_ip_k", BASIS)]) , Product([Symbol("Jinv_10", GEO), Symbol("FE0_C1_D01_ip_k", BASIS)]) ]) , Sum([ Product([Symbol("Jinv_01", GEO), Symbol("FE0_C0_D10_ip_k", BASIS)]) , Product([Symbol("Jinv_11", GEO), Symbol("FE0_C0_D01_ip_k", BASIS)]) ]) ]) ]) ]) , Sum([ Product([ Sum([ Product([Symbol("Jinv_01", GEO), Symbol("FE0_C1_D10_ip_j", BASIS)]) , Product([Symbol("Jinv_11", GEO), Symbol("FE0_C1_D01_ip_j", BASIS)]) ]) , FloatValue(2) , Sum([ Product([Symbol("Jinv_01", GEO), Symbol("FE0_C1_D10_ip_k", BASIS)]) , Product([Symbol("Jinv_11", GEO), Symbol("FE0_C1_D01_ip_k", BASIS)]) ]) , FloatValue(2) ]) , Product([ Sum([ Sum([ Product([Symbol("Jinv_01", GEO), Symbol("FE0_C0_D10_ip_j", BASIS)]) , Product([Symbol("Jinv_11", GEO), Symbol("FE0_C0_D01_ip_j", BASIS)]) ]) , Sum([ Product([Symbol("Jinv_00", GEO), Symbol("FE0_C1_D10_ip_j", BASIS)]) , Product([Symbol("Jinv_10", GEO), Symbol("FE0_C1_D01_ip_j", BASIS)]) ]) ]) , Sum([ Sum([ Product([Symbol("Jinv_01", GEO), Symbol("FE0_C0_D10_ip_k", BASIS)]) , Product([Symbol("Jinv_11", GEO), Symbol("FE0_C0_D01_ip_k", BASIS)]) ]) , Sum([ Product([Symbol("Jinv_00", GEO), Symbol("FE0_C1_D10_ip_k", BASIS)]) , Product([Symbol("Jinv_10", GEO), Symbol("FE0_C1_D01_ip_k", BASIS)]) ]) ]) ]) ]) ]) , FloatValue(0.25) , Symbol("W4_ip", IP) , Symbol("det", GEO) ]) # print "\nElasticity2D" # start = time.time() expr_exp = expr.expand() # print "Elasticity2D: time, expand(): ", time.time() - start # start = time.time() elasticity_exp = expand_operations(elasticity, format) # print "Elasticity2D: time, old expand(): ", time.time() - start # start = time.time() expr_red = expr_exp.reduce_ops() # print "Elasticity2D: time, reduce_ops(): ", time.time() - start # start = time.time() elasticity_red = reduce_operations(elasticity, format) # print "Elasticity2D: time, old reduce(): ", time.time() - start elasticity_exp_ops = operation_count(elasticity_exp, format) elasticity_red_ops = operation_count(elasticity_red, format) # print "expr.ops(): ", expr.ops() # print "Elasticity2D old exp: ops: ", elasticity_exp_ops # print "expr_exp.ops(): ", expr_exp.ops() # print "Elasticity2D old red: ops: ", elasticity_red_ops # print "expr_red.ops(): ", expr_red.ops() # print "expr:\n", expr # print "exp:\n", expr_exp # print "red:\n", expr_red # print "old red:\n", elasticity_red Jinv_00, Jinv_01, Jinv_10, Jinv_11, W4_ip, det = (1.1, 1.5, -4.3, 1.7, 11, 52.3) FE0_C0_D01_ip_j, FE0_C0_D10_ip_j, FE0_C0_D01_ip_k, FE0_C0_D10_ip_k = (1.12, 5.7, -9.3, 7.4) FE0_C1_D01_ip_j, FE0_C1_D10_ip_j, FE0_C1_D01_ip_k, FE0_C1_D10_ip_k = (3.12, -8.1, -45.3, 17.5) self.assertAlmostEqual(eval(str(expr)), eval(str(expr_exp))) self.assertAlmostEqual(eval(str(expr)), eval(str(expr_red))) self.assertAlmostEqual(eval(str(expr)), eval(str(elasticity))) self.assertAlmostEqual(eval(str(expr)), eval(str(elasticity_exp))) self.assertAlmostEqual(eval(str(expr)), eval(str(elasticity_red))) self.assertEqual(expr.ops(), 52) self.assertEqual(elasticity_exp_ops, 159) self.assertEqual(expr_exp.ops(), 159) self.assertEqual(elasticity_red_ops, 71) self.assertEqual(expr_red.ops(), 71) if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestElasticity2D('testElasticity2D')) ffc-1.6.0/test/unit/symbolics/testelasticityterm.py000077500000000000000000000063121255571034100225420ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-03-11 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.symbolics import * from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) class TestElasticityTerm(unittest.TestCase): def testElasticityTerm(self): # expr: 0.25*W1*det*(FE0_C2_D001[0][j]*FE0_C2_D001[0][k]*Jinv_00*Jinv_21 + FE0_C2_D001[0][j]*FE0_C2_D001[0][k]*Jinv_00*Jinv_21) expr = Product([ FloatValue(0.25), Symbol('W1', GEO), Symbol('det', GEO), Sum([Product([Symbol('FE0_C2_D001_0_j', BASIS), Symbol('FE0_C2_D001_0_k', BASIS), Symbol('Jinv_00', GEO), Symbol('Jinv_21', GEO)]), Product([Symbol('FE0_C2_D001_0_j', BASIS), Symbol('FE0_C2_D001_0_k', BASIS), Symbol('Jinv_00', GEO), Symbol('Jinv_21', GEO)]) ]) ]) # print "\nElasticityTerm" # start = time.time() expr_exp = expr.expand() # print "ElasticityTerm: time, expand(): ", time.time() - start # start = time.time() expr_red = expr_exp.reduce_ops() # print "ElasticityTerm: time, reduce_ops(): ", time.time() - start # print "expr.ops(): ", expr.ops() # print "expr_exp.ops(): ", expr_exp.ops() # print "expr_red.ops(): ", expr_red.ops() # print "expr:\n", expr # print "exp:\n", expr_exp # print "red:\n", expr_red det, W1, Jinv_00, Jinv_21, FE0_C2_D001_0_j, FE0_C2_D001_0_k = [0.123 + i for i in range(6)] self.assertAlmostEqual(eval(str(expr)), eval(str(expr_exp))) self.assertAlmostEqual(eval(str(expr)), eval(str(expr_red))) self.assertEqual(expr.ops(), 10) self.assertEqual(expr_exp.ops(), 6) self.assertEqual(expr_red.ops(), 6) # Generate code ip_consts = {} geo_consts = {} trans_set = set() start = time.time() opt_code = optimise_code(expr, ip_consts, geo_consts, trans_set) # print "ElasticityTerm, optimise_code(): ", time.time() - start G = [eval(str(list(geo_consts.items())[0][0]))] self.assertAlmostEqual(eval(str(expr)), eval(str(opt_code))) if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestElasticityTerm('testElasticityTerm')) ffc-1.6.0/test/unit/symbolics/testelasweighted.py000077500000000000000000000111371255571034100221460ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-03-11 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.symbolics import * from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) class TestElasWeighted(unittest.TestCase): def testElasWeighted(self): expr = Product([ Symbol('W4', IP), Sum([ Product([ Symbol('FE0_C1_D01_ip_j', BASIS), Symbol('FE0_C1_D01_ip_k', BASIS), Symbol('Jinv_00', GEO), Symbol('w1', GEO) ]), Product([ Symbol('FE0_C1_D01_ip_j', BASIS), Symbol('FE0_C1_D01_ip_k', BASIS), Symbol('Jinv_01', GEO), Symbol('w0', GEO) ]), Product([ Symbol('w2', GEO), Sum([ Product([ Symbol('FE0_C1_D01_ip_j', BASIS), Symbol('FE0_C1_D01_ip_k', BASIS), Symbol('Jinv_00', GEO), Symbol('w1', GEO) ]), Product([ Symbol('FE0_C1_D01_ip_j', BASIS), Symbol('FE0_C1_D01_ip_k', BASIS), Symbol('Jinv_01', GEO), Symbol('w0', GEO) ]) ]) ]) ]) ]) # print "\nElasticityWeighted" # start = time.time() expr_exp = expr.expand() # print "ElasWeighted: time, expand(): ", time.time() - start # start = time.time() expr_red = expr_exp.reduce_ops() # print "ElasWeighted: time, reduce_ops(): ", time.time() - start # print "expr.ops(): ", expr.ops() # print "expr_exp.ops(): ", expr_exp.ops() # print "expr_red.ops(): ", expr_red.ops() # print "expr:\n", expr # print "exp:\n", expr_exp # print "red:\n", expr_red det, W4, w0, w1, w2, Jinv_00, Jinv_01, Jinv_11, Jinv_10, FE0_C1_D01_ip_j, FE0_C1_D01_ip_k = [0.123 + i for i in range(11)] self.assertAlmostEqual(eval(str(expr)), eval(str(expr_exp))) self.assertAlmostEqual(eval(str(expr)), eval(str(expr_red))) self.assertEqual(expr.ops(), 17) self.assertEqual(expr_exp.ops(), 21) self.assertEqual(expr_red.ops(), 10) # Generate code ip_consts = {} geo_consts = {} trans_set = set() start = time.time() opt_code = optimise_code(expr, ip_consts, geo_consts, trans_set) # print "ElasWeighted, optimise_code(): ", time.time() - start G = [eval(str(list(geo_consts.items())[0][0]))] I = [eval(str(list(ip_consts.items())[0][0]))] self.assertAlmostEqual(eval(str(expr)), eval(str(opt_code))) if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestElasWeighted('testElasWeighted')) ffc-1.6.0/test/unit/symbolics/testelasweighted2.py000077500000000000000000000132151255571034100222270ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-03-11 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.symbolics import * from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) class TestElasWeighted2(unittest.TestCase): def testElasWeighted2(self): expr = Product([ Symbol('W4', IP), Sum([ Product([ Symbol('FE0_C1_D01_ip_j', BASIS), Symbol('FE0_C1_D01_ip_k', BASIS), Symbol('Jinv_00', GEO), Symbol('w1', GEO) ]), Product([ Symbol('FE0_C1_D01_ip_j', BASIS), Symbol('Jinv_01', GEO), Sum([ Product([ Symbol('FE0_C1_D01_ip_k', BASIS), Symbol('w0', GEO) ]), Product([ Symbol('FE0_C1_D01_ip_k', BASIS), Symbol('w1', GEO) ]) ]) ]), Product([ Symbol('w2', GEO), Sum([ Product([ Symbol('FE0_C1_D01_ip_j', BASIS), Symbol('FE0_C1_D01_ip_k', BASIS), Symbol('Jinv_00', GEO), Symbol('w1', GEO) ]), Product([ Symbol('FE0_C1_D01_ip_j', BASIS), Symbol('Jinv_01', GEO), Sum([ Product([ Symbol('FE0_C1_D01_ip_k', BASIS), Symbol('w0', GEO) ]), Product([ Symbol('FE0_C1_D01_ip_k', BASIS), Symbol('w1', GEO) ]) ]) ]) ]) ]) ]) ]) # print "\nElasticityWeighted2" start = time.time() expr_exp = expr.expand() # print "ElasWeighted2: time, expand(): ", time.time() - start start = time.time() expr_red = expr_exp.reduce_ops() # print "ElasWeighted2: time, reduce_ops(): ", time.time() - start # print "expr.ops(): ", expr.ops() # print "expr_exp.ops(): ", expr_exp.ops() # print "expr_red.ops(): ", expr_red.ops() # print "expr:\n", expr # print "exp:\n", expr_exp # print "red:\n", expr_red det, W4, w0, w1, w2, Jinv_00, Jinv_01, Jinv_11, Jinv_10, FE0_C1_D01_ip_j, FE0_C1_D01_ip_k = [0.123 + i for i in range(11)] self.assertAlmostEqual(eval(str(expr)), eval(str(expr_exp))) self.assertAlmostEqual(eval(str(expr)), eval(str(expr_red))) self.assertEqual(expr.ops(), 21) self.assertEqual(expr_exp.ops(), 32) self.assertEqual(expr_red.ops(), 12) # Generate code ip_consts = {} geo_consts = {} trans_set = set() start = time.time() opt_code = optimise_code(expr, ip_consts, geo_consts, trans_set) # print "ElasWeighted2, optimise_code(): ", time.time() - start G = [eval(str(list(geo_consts.items())[0][0]))] I = [eval(str(list(ip_consts.items())[0][0]))] self.assertAlmostEqual(eval(str(expr)), eval(str(opt_code))) if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestElasWeighted2('testElasWeighted2')) ffc-1.6.0/test/unit/symbolics/testexpandoperations.py000077500000000000000000000230471255571034100230670ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.symbolics import * from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) class TestExpandOperations(unittest.TestCase): def testExpandOperations(self): f0 = FloatValue(-1) f1 = FloatValue(2) f2 = FloatValue(1) sx = Symbol("x", GEO) sy = Symbol("y", GEO) sz = Symbol("z", GEO) s0 = Product([FloatValue(-1), Symbol("x", GEO)]) s1 = Symbol("y", GEO) s2 = Product([FloatValue(5), Symbol("z", IP)]) s3 = Product([FloatValue(-4), Symbol("z", GEO)]) # Random variable values x = 2.2 y = -0.2 z = 1.1 # Aux. expressions P0 = Product([s2, s1]) P1 = Product([P0, s0]) P2 = Product([P1, s1, P0]) P3 = Product([P1, P2]) S0 = Sum([s2, s1]) S1 = Sum([S0, s0]) S2 = Sum([S1, s1, S0]) S3 = Sum([S1, S2]) F0 = Fraction(s2, s1) F1 = Fraction(F0, s0) F2 = Fraction(F1, F0) F3 = Fraction(F1, F2) # Special fractions F4 = Fraction(P0, F0) F5 = Fraction(Fraction(s0, P0), P0) F6 = Fraction( Fraction( Fraction(s1, s0), Fraction(s1, s2)), Fraction( Fraction(s2, s0), Fraction(s1, s0)) ) F7 = Fraction(s1, Product([s1, Symbol("x", GEO)])) F8 = Fraction( Sum([sx, Fraction(sy, sx)]), FloatValue(2)) F4x = F4.expand() F5x = F5.expand() F6x = F6.expand() F7x = F7.expand() F8x = F8.expand() # print "\nF4: '%s'" %F4 # print "F4x: '%s'" %F4x # print "\nF5: '%s'" %F5 # print "F5x: '%s'" %F5x # print "\nF6: '%s'" %F6 # print "F6x: '%s'" %F6x # print "\nF7: '%s'" %F7 # print "F7x: '%s'" %F7x # print "\nF8: '%s'" %F8 # print "F8x: '%s'" %F8x self.assertAlmostEqual(eval(str(F4)), eval(str(F4x))) self.assertAlmostEqual(eval(str(F5)), eval(str(F5x))) self.assertAlmostEqual(eval(str(F6)), eval(str(F6x))) self.assertAlmostEqual(eval(str(F7)), eval(str(F7x))) self.assertAlmostEqual(eval(str(F8)), eval(str(F8x))) self.assertEqual(F4.ops(), 5) self.assertEqual(F4x.ops(), 1) self.assertEqual(F5.ops(), 6) self.assertEqual(F5x.ops(), 5) self.assertEqual(F6.ops(), 9) self.assertEqual(F6x.ops(), 1) self.assertEqual(F7.ops(), 2) self.assertEqual(F7x.ops(), 1) self.assertEqual(F8.ops(), 3) self.assertEqual(F8x.ops(), 4) # Expressions that should be expanded e0 = Product([P3, F2]) e1 = Product([S3, P2]) e2 = Product([F3, S1]) e3 = Sum([P3, F2]) e4 = Sum([S3, P2]) e5 = Sum([F3, S1]) e6 = Fraction(P3, F2) e7 = Fraction(S3, P2) e8 = Fraction(F3, S1) e9 = Fraction(S0, s0) e0x = e0.expand() e1x = e1.expand() e2x = e2.expand() e3x = e3.expand() e4x = e4.expand() e5x = e5.expand() e6x = e6.expand() e7x = e7.expand() e8x = e8.expand() e9x = e9.expand() # print "\ne0: '%s'" %e0 # print "e0x: '%s'" %e0x # print "\ne1: '%s'" %e1 # print "e1x: '%s'" %e1x # print "\ne2: '%s'" %e2 # print "e2x: '%s'" %e2x # print "\ne3: '%s'" %e3 # print "e3x: '%s'" %e3x # print "\ne4: '%s'" %e4 # print "e4x: '%s'" %e4x # print "\ne5: '%s'" %e5 # print "e5x: '%s'" %e5x # print "\ne6: '%s'" %e6 # print "e6x: '%s'" %e6x # print "\ne7: '%s'" %e7 # print "e7x: '%s'" %e7x # print "\ne8: '%s'" %e8 # print "e8x: '%s'" %e8x # print "\ne9: '%s'" %e9 # print "e9x: '%s'" %e9x self.assertAlmostEqual(eval(str(e0)), eval(str(e0x))) self.assertAlmostEqual(eval(str(e1)), eval(str(e1x))) self.assertAlmostEqual(eval(str(e2)), eval(str(e2x))) self.assertAlmostEqual(eval(str(e3)), eval(str(e3x))) self.assertAlmostEqual(eval(str(e4)), eval(str(e4x))) self.assertAlmostEqual(eval(str(e5)), eval(str(e5x))) self.assertAlmostEqual(eval(str(e6)), eval(str(e6x))) self.assertAlmostEqual(eval(str(e7)), eval(str(e7x))) self.assertAlmostEqual(eval(str(e8)), eval(str(e8x))) self.assertAlmostEqual(eval(str(e9)), eval(str(e9x))) self.assertEqual(e0.ops(), 16) self.assertEqual(e0x.ops(), 8) self.assertEqual(e1.ops(), 18) self.assertEqual(e1x.ops(), 23) self.assertEqual(e2.ops(), 14) self.assertEqual(e2x.ops(), 9) self.assertEqual(e3.ops(), 16) self.assertEqual(e3x.ops(), 11) self.assertEqual(e4.ops(), 18) self.assertEqual(e4x.ops(), 12) self.assertEqual(e5.ops(), 14) self.assertEqual(e5x.ops(), 6) self.assertEqual(e6.ops(), 16) self.assertEqual(e6x.ops(), 10) self.assertEqual(e7.ops(), 18) self.assertEqual(e7x.ops(), 17) self.assertEqual(e8.ops(), 14) self.assertEqual(e8x.ops(), 8) self.assertEqual(e9.ops(), 3) self.assertEqual(e9x.ops(), 4) # More expressions (from old expand tests) PF = Product([F0, F1]) E0 = Product([s1, f0, S1]) E1 = Sum([P0, E0]) E2 = Fraction(Sum([Product([f1])]), f2) E3 = Sum([F0, F0]) E4 = Product([ Sum([ Product([sx, Sum([sy, Product([ Sum([sy, Product([sy, sz]), sy])]), sy])]), Product([sx, Sum([ Product([sy, sz]), sy])])])]) P4 = Product([s1, Sum([s0, s1])]) P5 = Product([s0, E0]) P6 = Product([s1]) S4 = Sum([s1]) # Create 'real' term that caused me trouble P00 = Product([Symbol("Jinv_00", GEO)]*2) P01 = Product([Symbol("Jinv_01", GEO)]*2) P20 = Product([Symbol("Jinv_00", GEO), Product([f1, Symbol("Jinv_20", GEO)]) ]) P21 = Product([Symbol("Jinv_01", GEO), Product([f1, Symbol("Jinv_21", GEO)]) ]) PS0 = Product([Symbol("Jinv_22", GEO), Sum([P00, P01])]) PS1 = Product([ Product([f0, Symbol("Jinv_02", GEO)]), Sum([P20, P21])]) SP = Sum([PS0, PS1]) PFx = PF.expand() E0x = E0.expand() E1x = E1.expand() E2x = E2.expand() E3x = E3.expand() E4x = E4.expand() P4x = P4.expand() P5x = P5.expand() P6x = P6.expand() S4x = S4.expand() SPx = SP.expand() # print "\nPF: '%s'" %PF # print "PFx: '%s'" %PFx # print "\nE0: '%s'" %E0 # print "E0x: '%s'" %E0x # print "\nE1: '%s'" %E1 # print "E1x: '%s'" %E1x # print "\nE2: '%s'" %E2 # print "E2x: '%s'" %E2x # print "\nE3: '%s'" %E3 # print "E3x: '%s'" %E3x # print "\nE4: '%s'" %E4 # print "E4x: '%s'" %E4x # print "\nP4: '%s'" %P4 # print "P4x: '%s'" %P4x # print "\nP5: '%s'" %P5 # print "P5x: '%s'" %P5x # print "\nP6: '%s'" %repr(P6) # print "P6x: '%s'" %repr(P6x) # print "\nS4: '%s'" %repr(S4) # print "S4x: '%s'" %repr(S4x) # print "\nSP: '%s'" %SP # print "SPx: '%s'" %SPx Jinv_00, Jinv_01, Jinv_10, Jinv_02, Jinv_20, Jinv_22, Jinv_21, W1, det = [1,2,3,4,5,6,7,8,9] self.assertAlmostEqual(eval(str(SP)), eval(str(SPx))) self.assertAlmostEqual(eval(str(E0)), eval(str(E0x))) self.assertAlmostEqual(eval(str(E1)), eval(str(E1x))) self.assertAlmostEqual(eval(str(E2)), eval(str(E2x))) self.assertAlmostEqual(eval(str(E3)), eval(str(E3x))) self.assertAlmostEqual(eval(str(E4)), eval(str(E4x))) self.assertAlmostEqual(eval(str(SP)), eval(str(SPx))) self.assertAlmostEqual(eval(str(P4)), eval(str(P4x))) self.assertAlmostEqual(eval(str(P5)), eval(str(P5x))) self.assertEqual(P6x, s1) self.assertEqual(S4x, s1) self.assertEqual(PF.ops(), 6) self.assertEqual(PFx.ops(), 5) self.assertEqual(E0.ops(), 4) self.assertEqual(E0x.ops(), 6) self.assertEqual(E1.ops(), 7) self.assertEqual(E1x.ops(), 3) self.assertEqual(E2.ops(), 1) self.assertEqual(E2x.ops(), 0) self.assertEqual(E3.ops(), 5) self.assertEqual(E3x.ops(), 5) self.assertEqual(E4.ops(), 10) self.assertEqual(E4x.ops(), 6) self.assertEqual(SP.ops(), 11) self.assertEqual(SPx.ops(), 13) self.assertEqual(P4.ops(), 2) self.assertEqual(P4x.ops(), 3) self.assertEqual(P5.ops(), 5) self.assertEqual(P5x.ops(), 9) if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestExpandOperations('testExpandOperations')) ffc-1.6.0/test/unit/symbolics/testfloat.py000077500000000000000000000052361255571034100206110ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.symbolics import * from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS["precision"]) class TestFloat(unittest.TestCase): def testFloat(self): "Test simple FloatValue instance." f0 = FloatValue(1.5) f1 = FloatValue(-5) f2 = FloatValue(-1e-14) f3 = FloatValue(-1e-11) f4 = FloatValue(1.5) # print "\nTesting FloatValue" # print "f0: '%s'" %f0 # print "f1: '%s'" %f1 # print "f2: '%s'" %f2 # print "f3: '%s'" %f3 self.assertEqual(repr(f0), "FloatValue(%s)" % format["float"](1.5)) self.assertEqual(repr(f1), "FloatValue(%s)" % format["float"](-5)) self.assertEqual(repr(f2), "FloatValue(%s)" % format["float"](0)) self.assertEqual(repr(f3), "FloatValue(%s)" % format["float"](-1e-11)) self.assertEqual(f2.val == 0, True) self.assertEqual(f3.val == 0, False) self.assertEqual(f0.ops(), 0) self.assertEqual(f1.ops(), 0) self.assertEqual(f2.ops(), 0) self.assertEqual(f3.ops(), 0) self.assertEqual(f0 == f4, True) self.assertEqual(f1 != f3, True) self.assertEqual(f0 < f1, False) self.assertEqual(f2 > f3, True) # Test hash l = [f0] d = {f0:0} self.assertEqual(f0 in l, True) self.assertEqual(f0 in d, True) self.assertEqual(f4 in l, True) self.assertEqual(f4 in d, True) self.assertEqual(f1 in l, False) self.assertEqual(f1 in d, False) if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestFloat('testFloat')) ffc-1.6.0/test/unit/symbolics/testfloatoperators.py000077500000000000000000000066511255571034100225520ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.reduce_operations import expand_operations, reduce_operations from ffc.quadrature.symbolics import * from ffc.quadrature.sumobj import _group_fractions from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) from ffc.log import error, push_level, pop_level, CRITICAL class TestFloatOperators(unittest.TestCase): def testFloatOperators(self): "Test binary operators" f0 = FloatValue(0.0) f2 = FloatValue(2.0) f3= FloatValue(3.0) fm1= FloatValue(-1.0) fm3= FloatValue(-3.0) x = Symbol("x", GEO) y = Symbol("y", GEO) z = Symbol("z", GEO) p0 = Product([f2, x]) p1 = Product([x, y]) p2 = Product([f2, z]) p3 = Product([y, x, z]) p4 = Product([fm1, f2, x]) S0 = Sum([p0, fm3]) S1 = Sum([x, y]) S2 = Sum([S1, fm3]) S3 = Sum([p4, fm3]) S4 = Sum([fm3, Product([fm1, Sum([x, y])])]) F0 = Fraction(f2, y) F1 = Fraction(FloatValue(-1.5), x) F2 = Fraction(fm3, S1) SF0 = Sum([f3, F1]) SF1 = Sum([f3, Product([fm1, F1])]) # Test FloatValue '+' self.assertEqual(str(f2 + fm3), str(fm1)) self.assertEqual(str(f2 + fm3 + fm3 + f2 + f2), str(f0)) self.assertEqual(str(f0 + p0), str(p0)) self.assertEqual(str(fm3 + p0), str(S0)) self.assertEqual(str(fm3 + S1), str(S2)) self.assertEqual(str(f3 + F1), str(SF0)) # Test FloatValue '-' self.assertEqual(str(f2 - fm3), str(FloatValue(5))) self.assertEqual(str(f0 - p0), str(p4)) self.assertEqual(str(fm3 - p0), str(S3)) self.assertEqual(str(fm3 - S1), str(S4)) self.assertEqual(str(f3 - F1), str(SF1)) # Test FloatValue '*', only need one because all other cases are # handled by 'other' self.assertEqual(str(f2*f2), '%s' % format["float"](4)) # Test FloatValue '/' self.assertEqual(str(fm3/f2), str(FloatValue(-1.5))) self.assertEqual(str(f2/y), str(F0)) self.assertEqual(str(fm3/p0), str(F1)) self.assertEqual(str(fm3/S1), str(F2)) # Silence output push_level(CRITICAL) self.assertRaises(Exception, f2.__truediv__, F0) self.assertRaises(Exception, f2.__truediv__, f0) self.assertRaises(Exception, f2.__truediv__, Product([f0, y])) pop_level() if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestFloatOperators('testFloatOperators')) ffc-1.6.0/test/unit/symbolics/testfraction.py000077500000000000000000000066741255571034100213200ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.symbolics import * from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) from ffc.log import push_level, pop_level, CRITICAL class TestFraction(unittest.TestCase): def testFraction(self): "Test simple fraction instance." f0 = FloatValue(-2.0) f1 = FloatValue(3.0) f2 = FloatValue(0) s0 = Symbol("x", BASIS) s1 = Symbol("y", GEO) F0 = Fraction(f1, f0) F1 = Fraction(f2, f0) F2 = Fraction(s0, s1) F3 = Fraction(s0, f1) F4 = Fraction(f0, s1) F5 = Fraction(f2, s1) F6 = Fraction(s0, s1) # print "\nTesting Fractions" # print "F0 = frac(%s, %s) = '%s'" %(f1, f0, F0) # print "F1 = frac(%s, %s) = '%s'" %(f2, f0, F1) # print "F2 = frac(%s, %s) = '%s'" %(s0, s1, F2) # print "F3 = frac(%s, %s) = '%s'" %(s0, f1, F3) # print "F4 = frac(%s, %s) = '%s'" %(f0, s1, F4) # print "F5 = frac(%s, %s) = '%s'" %(f2, s1, F5) # print "F6 = frac(%s, %s) = '%s'" %(s0, s1, F6) # Silence output push_level(CRITICAL) self.assertRaises(Exception, Fraction, f0, f2) self.assertRaises(Exception, Fraction, s0, f2) pop_level() self.assertEqual(repr(F0), "Fraction(FloatValue(%s), FloatValue(%s))"\ % (format["float"](-1.5), format["float"](1))) self.assertEqual(repr(F2), "Fraction(Symbol('x', BASIS), Symbol('y', GEO))") self.assertEqual(str(F0), "%s" % format["float"](-1.5)) self.assertEqual(str(F1), "%s" % format["float"](0)) self.assertEqual(str(F2), "x/y") self.assertEqual(str(F3), "x/%s" % format["float"](3)) self.assertEqual(str(F4), "-%s/y" % format["float"](2)) self.assertEqual(str(F5), "%s" % format["float"](0)) self.assertEqual(F2 == F2, True) self.assertEqual(F2 == F3, False) self.assertEqual(F5 != F4, True) self.assertEqual(F2 == F6, True) self.assertEqual(F0.ops(), 0) self.assertEqual(F1.ops(), 0) self.assertEqual(F2.ops(), 1) self.assertEqual(F3.ops(), 1) self.assertEqual(F4.ops(), 1) self.assertEqual(F5.ops(), 0) # Test hash l = [F2] d = {F2:0} self.assertEqual(F2 in l, True) self.assertEqual(F2 in d, True) self.assertEqual(F6 in l, True) self.assertEqual(F6 in d, True) if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestFraction('testFraction')) ffc-1.6.0/test/unit/symbolics/testfractionoperators.py000077500000000000000000000074671255571034100232600ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations from ffc.quadrature.symbolics import * from ffc.quadrature.sumobj import _group_fractions from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) from ffc.log import error, push_level, pop_level, CRITICAL class TestFractionOperators(unittest.TestCase): def testFractionOperators(self): "Test binary operators" f_0 = format["float"](0) f_1 = format["float"](1) f_2 = format["float"](2) f_5 = format["float"](5) f2 = FloatValue(2.0) fm3 = FloatValue(-3.0) x = Symbol("x", GEO) y = Symbol("y", GEO) p0 = Product([f2, x]) p1 = Product([x, y]) S0 = Sum([x, y]) F0 = Fraction(f2, y) F1 = Fraction(x, y) F2 = Fraction(x, S0) F3 = Fraction(x, y) F4 = Fraction(p0, y) F5 = Fraction(Product([fm3, x]), y) # Test Fraction '+' self.assertEqual(str(F0 + f2), '(%s + %s/y)' % (f_2, f_2)) self.assertEqual(str(F1 + x), '(x + x/y)') self.assertEqual(str(F1 + p0), '(%s*x + x/y)' % f_2) self.assertEqual(str(F1 + S0), '(x + y + x/y)') self.assertEqual(str(F1 + F3), '%s*x/y' % f_2) self.assertEqual(str(F0 + F1), '(%s + x)/y' % f_2) self.assertEqual(str(F2 + F4), '(%s*x/y + x/(x + y))' % f_2) # Test Fraction '-' self.assertEqual(str(F0 - f2), '(%s/y-%s)' % (f_2, f_2)) self.assertEqual(str(F1 - x), '(x/y - x)') self.assertEqual(str(F1 - p0), '(x/y-%s*x)' % f_2) self.assertEqual(str(F1 - S0), '(x/y - (x + y))') self.assertEqual(str(F1 - F3), '%s' % f_0) self.assertEqual(str(F4 - F1), 'x/y') self.assertEqual(str(F4 - F5), '%s*x/y' % f_5) self.assertEqual(str(F0 - F1), '(%s - x)/y' % f_2) self.assertEqual(str(F2 - F4), '(x/(x + y) - %s*x/y)' % f_2) # Test Fraction '*' self.assertEqual(str(F1 * f2), '%s*x/y' % f_2) self.assertEqual(str(F1 * x), 'x*x/y') self.assertEqual(str(F1 * p1), 'x*x') self.assertEqual(str(F1 * S0), '(x + x*x/y)') self.assertEqual(repr(F1 * S0), repr(Sum([x, Fraction( Product([x, x]), y)]) )) self.assertEqual(str(F1 * F0), '%s*x/(y*y)' % f_2) # Test Fraction '/' self.assertEqual(str(F0 / f2), '%s/y' % f_1) self.assertEqual(str(F1 / x), '%s/y' % f_1) self.assertEqual(str(F4 / p1), '%s/(y*y)' % f_2) self.assertEqual(str(F4 / x), '%s/y' % f_2) self.assertEqual(str(F2 / y), 'x/(x*y + y*y)') self.assertEqual(str(F0 / S0), '%s/(x*y + y*y)' % f_2) # Silence output push_level(CRITICAL) self.assertRaises(Exception, F0.__truediv__, F0) pop_level() if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestFractionOperators('testFractionOperators')) ffc-1.6.0/test/unit/symbolics/testmixedsymbols.py000077500000000000000000000276041255571034100222260ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-03-11 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.symbolics import * from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) class TestMixedSymbols(unittest.TestCase): def testMixedSymbols(self): f_0 = format["float"](0) f_2 = format["float"](2) f_3 = format["float"](3) f_4 = format["float"](4) f_6 = format["float"](6) f0 = FloatValue(-2.0) f1 = FloatValue(3.0) f2 = FloatValue(0) s0 = Symbol("x", BASIS) s1 = Symbol("y", GEO) s2 = Symbol("z", GEO) p0 = Product([s0, s1]) p1 = Product([f1, s0, s1]) p2 = Product([s0, f2, s2]) p3 = Product([s0, f0, s1, f1, s2]) S0 = Sum([s0, s1]) S1 = Sum([s0, s0]) S2 = Sum([f0, s0]) S3 = Sum([s0, f0, s0]) F0 = Fraction(f1, f0) F1 = Fraction(s0, s1) F2 = Fraction(s0, f1) F3 = Fraction(f0, s1) x = 1.2; y = 2.36; z = 6.75; # Mixed products mpp0 = Product([p0, s0]) mpp1 = Product([p1, p0]) mpp2 = Product([p2, p3]) mpp3 = Product([p1, mpp1]) mps0 = Product([S0, s0]) mps1 = Product([S1, S0]) mps2 = Product([S2, S3]) mps3 = Product([S1, mps1]) mpf0 = Product([F1, s0]) mpf1 = Product([F1, F2]) mpf2 = Product([F2, F3]) mpf3 = Product([F1, mpf1]) # print "\nMixed Products" # print "\nmpp0: %s * %s = '%s'" % (p0, s0, mpp0) # print "mpp1: %s * %s = '%s'" % (p1, p0, mpp1) # print "mpp2: %s * %s = '%s'" % (p2, p3, mpp2) # print "mpp3: %s * %s = '%s'" % (p1, mpp1, mpp3) # print "\nmps0: %s * %s = '%s'" % (S0, s0, mps0) # print "mps1: %s * %s = '%s'" % (S1, S0, mps1) # print "mps2: %s * %s = '%s'" % (S2, S3, mps2) # print "mps3: %s * %s = '%s'" % (S1, mps1, mps3) # print "\nmpf0: %s * %s = '%s'" % (F1, s0, mpf0) # print "mpf1: %s * %s = '%s'" % (F1, F2, mpf1) # print "mpf2: %s * %s = '%s'" % (F2, F3, mpf2) # print "mpf3: %s * %s = '%s'" % (F1, mpf1, mpf3) self.assertAlmostEqual(eval(str(mpp0)), eval(str(p0))*eval(str(s0))) self.assertAlmostEqual(eval(str(mpp1)), eval(str(p1))*eval(str(p0))) self.assertAlmostEqual(eval(str(mpp2)), eval(str(p2))*eval(str(p3))) self.assertAlmostEqual(eval(str(mpp3)), eval(str(p1))*eval(str(mpp1))) self.assertAlmostEqual(eval(str(mps0)), eval(str(S0))*eval(str(s0))) self.assertAlmostEqual(eval(str(mps1)), eval(str(S1))*eval(str(S0))) self.assertAlmostEqual(eval(str(mps2)), eval(str(S2))*eval(str(S3))) self.assertAlmostEqual(eval(str(mps3)), eval(str(S1))*eval(str(mps1))) self.assertAlmostEqual(eval(str(mpf0)), eval(str(F1))*eval(str(s0))) self.assertAlmostEqual(eval(str(mpf1)), eval(str(F1))*eval(str(F2))) self.assertAlmostEqual(eval(str(mpf2)), eval(str(F2))*eval(str(F3))) self.assertAlmostEqual(eval(str(mpf3)), eval(str(F1))*eval(str(mpf1))) self.assertEqual(mpp0.ops(), 2) self.assertEqual(mpp1.ops(), 4) self.assertEqual(mpp2.ops(), 0) self.assertEqual(mpp3.ops(), 6) self.assertEqual(mps0.ops(), 2) self.assertEqual(mps1.ops(), 3) self.assertEqual(mps2.ops(), 4) self.assertEqual(mps3.ops(), 5) self.assertEqual(mpf0.ops(), 2) self.assertEqual(mpf1.ops(), 3) self.assertEqual(mpf2.ops(), 3) self.assertEqual(mpf3.ops(), 5) self.assertEqual(str(mpp0), 'x*x*y') self.assertEqual(str(mpp1), '%s*x*x*y*y' % f_3) self.assertEqual(str(mpp2), '%s' % f_0) self.assertEqual(str(mpp3), '%s*x*x*x*y*y*y' % format["float"](9)) self.assertEqual(str(mps0), 'x*(x + y)') self.assertEqual(str(mps1), '(x + x)*(x + y)') # self.assertEqual(str(mps2), '(x-2)*(x + x-2)') self.assertEqual(str(mps2), '(x + x-%s)*(x-%s)' % (f_2, f_2)) self.assertEqual(str(mps3), '(x + x)*(x + x)*(x + y)') self.assertEqual(str(mpf0), 'x*x/y') self.assertEqual(str(mpf1), 'x/%s*x/y' % f_3) self.assertEqual(str(mpf2), '-%s/y*x/%s' % (f_2, f_3)) self.assertEqual(str(mpf3), 'x/%s*x/y*x/y' % f_3) # Mixed sums msp0 = Sum([p0, s0]) msp1 = Sum([p1, p0]) msp2 = Sum([p2, p3]) msp3 = Sum([p1, msp1]) msp4 = Sum([f2, f2]) mss0 = Sum([S0, s0]) mss1 = Sum([S1, S0]) mss2 = Sum([S2, S3]) mss3 = Sum([S1, mps1]) msf0 = Sum([F1, s0]) msf1 = Sum([F1, F2]) msf2 = Sum([F2, F3]) msf3 = Sum([F1, msf1]) # print "\nTesting Mixed Sums" # print "\nmsp0: %s + %s = '%s'" % (p0, s0, msp0) # print "msp1: %s + %s = '%s'" % (p1, p0, msp1) # print "msp2: %s + %s = '%s'" % (p2, p3, msp2) # print "msp3: %s + %s = '%s'" % (p1, msp1, msp3) # print "msp4: %s + %s = '%s'" % (f2, f2, msp4) # print "\nmss0: %s + %s = '%s'" % (S0, s0, mss0) # print "mss1: %s + %s = '%s'" % (S1, S0, mss1) # print "mss2: %s + %s = '%s'" % (S2, S3, mss2) # print "mss3: %s + %s = '%s'" % (S1, mss1, mss3) # print "\nmsf0: %s + %s = '%s'" % (F1, s0, msf0) # print "msf1: %s + %s = '%s'" % (F1, F2, msf1) # print "msf2: %s + %s = '%s'" % (F2, F3, msf2) # print "msf3: %s + %s = '%s'" % (F1, msf1, msf3) # print "msf3: %s + %s = '%s'" % (F1, msf1, msf3) self.assertAlmostEqual(eval(str(msp0)), eval(str(p0))+eval(str(s0))) self.assertAlmostEqual(eval(str(msp1)), eval(str(p1))+eval(str(p0))) self.assertAlmostEqual(eval(str(msp2)), eval(str(p2))+eval(str(p3))) self.assertAlmostEqual(eval(str(msp3)), eval(str(p1))+eval(str(msp1))) self.assertEqual(str(msp4), '%s' % f_0) self.assertAlmostEqual(eval(str(mss0)), eval(str(S0))+eval(str(s0))) self.assertAlmostEqual(eval(str(mss1)), eval(str(S1))+eval(str(S0))) self.assertAlmostEqual(eval(str(mss2)), eval(str(S2))+eval(str(S3))) self.assertAlmostEqual(eval(str(mss3)), eval(str(S1))+eval(str(mps1))) self.assertAlmostEqual(eval(str(msf0)), eval(str(F1))+eval(str(s0))) self.assertAlmostEqual(eval(str(msf1)), eval(str(F1))+eval(str(F2))) self.assertAlmostEqual(eval(str(msf2)), eval(str(F2))+eval(str(F3))) self.assertAlmostEqual(eval(str(msf3)), eval(str(F1))+eval(str(msf1))) self.assertEqual(msp0.ops(), 2) self.assertEqual(msp1.ops(), 4) self.assertEqual(msp2.ops(), 3) self.assertEqual(msp3.ops(), 7) self.assertEqual(mss0.ops(), 2) self.assertEqual(mss1.ops(), 3) self.assertEqual(mss2.ops(), 3) self.assertEqual(mss3.ops(), 5) self.assertEqual(msf0.ops(), 2) self.assertEqual(msf1.ops(), 3) self.assertEqual(msf2.ops(), 3) self.assertEqual(msf3.ops(), 5) self.assertEqual(str(msp0), '(x + x*y)') self.assertEqual(str(msp1), '(%s*x*y + x*y)' % f_3) self.assertEqual(str(msp2), '-%s*x*y*z' % f_6) self.assertEqual(str(msp3), '(%s*x*y + %s*x*y + x*y)' % (f_3, f_3)) self.assertEqual(str(mss0), '(x + x + y)') self.assertEqual(str(mss1), '(x + x + x + y)') self.assertEqual(str(mss2), '(x + x + x-%s)' % f_4) self.assertEqual(str(mss3), '(x + x + (x + x)*(x + y))') self.assertEqual(str(msf0), '(x + x/y)') self.assertEqual(str(msf1), '(x/%s + x/y)' % f_3) self.assertEqual(str(msf2), '(x/%s-%s/y)' % (f_3, f_2)) self.assertEqual(str(msf3), '(x/%s + x/y + x/y)' % f_3) # Mixed fractions mfp0 = Fraction(p0, s0) mfp1 = Fraction(p1, p0) mfp2 = Fraction(p2, p3) mfp3 = Fraction(p1, mfp1) mfs0 = Fraction(S0, s0) mfs1 = Fraction(S1, S0) mfs2 = Fraction(S2, S3) mfs3 = Fraction(S1, mfs1) mff0 = Fraction(F1, s0) mff1 = Fraction(F1, F2) mff2 = Fraction(F2, F3) mff3 = Fraction(F1, mff1) # print "\nTesting Mixed Fractions" # print "\nmfp0: %s / %s = '%s'" % (p0, s0, mfp0) # print "mfp1: %s / %s = '%s'" % (p1, p0, mfp1) # print "mfp2: %s / %s = '%s'" % (p2, p3, mfp2) # print "mfp3: %s / %s = '%s'" % (p1, mfp1, mfp3) # print "\nmfs0: %s / %s = '%s'" % (S0, s0, mfs0) # print "mfs1: %s / %s = '%s'" % (S1, S0, mfs1) # print "mfs2: %s / %s = '%s'" % (S2, S3, mfs2) # print "mfs3: %s / %s = '%s'" % (S1, mfs1, mfs3) # print "\nmff0: %s / %s = '%s'" % (F1, s0, mff0) # print "mff1: %s / %s = '%s'" % (F1, F2, mff1) # print "mff2: %s / %s = '%s'" % (F2, F3, mff2) # print "mff3: %s / %s = '%s'" % (F1, mff1, mff3) self.assertAlmostEqual(eval(str(mfp0)), eval(str(p0))/eval(str(s0))) self.assertAlmostEqual(eval(str(mfp1)), eval(str(p1))/eval(str(p0))) self.assertAlmostEqual(eval(str(mfp2)), eval(str(p2))/eval(str(p3))) self.assertAlmostEqual(eval(str(mfp3)), eval(str(p1))/eval(str(mfp1))) self.assertAlmostEqual(eval(str(mfs0)), eval(str(S0))/eval(str(s0))) self.assertAlmostEqual(eval(str(mfs1)), eval(str(S1))/eval(str(S0))) self.assertAlmostEqual(eval(str(mfs2)), eval(str(S2))/eval(str(S3))) self.assertAlmostEqual(eval(str(mfs3)), eval(str(S1))/eval(str(mfs1))) self.assertAlmostEqual(eval(str(mff0)), eval(str(F1))/eval(str(s0))) self.assertAlmostEqual(eval(str(mff1)), eval(str(F1))/eval(str(F2))) self.assertAlmostEqual(eval(str(mff2)), eval(str(F2))/eval(str(F3))) self.assertAlmostEqual(eval(str(mff3)), eval(str(F1))/eval(str(mff1))) self.assertEqual(mfp0.ops(), 2) self.assertEqual(mfp1.ops(), 4) self.assertEqual(mfp2.ops(), 0) self.assertEqual(mfp3.ops(), 7) self.assertEqual(mfs0.ops(), 2) self.assertEqual(mfs1.ops(), 3) self.assertEqual(mfs2.ops(), 4) self.assertEqual(mfs3.ops(), 5) self.assertEqual(mff0.ops(), 2) self.assertEqual(mff1.ops(), 3) self.assertEqual(mff2.ops(), 3) self.assertEqual(mff3.ops(), 5) self.assertEqual(str(mfp0), 'x*y/x') self.assertEqual(str(mfp1), '%s*x*y/(x*y)' % f_3) self.assertEqual(str(mfp2), '%s' % f_0) self.assertEqual(str(mfp3), '%s*x*y/(%s*x*y/(x*y))' % (f_3, f_3)) self.assertEqual(str(mfs0), '(x + y)/x') self.assertEqual(str(mfs1), '(x + x)/(x + y)') self.assertEqual(str(mfs2), '(x-%s)/(x + x-%s)' % (f_2, f_2)) self.assertEqual(str(mfs3), '(x + x)/((x + x)/(x + y))') self.assertEqual(str(mff0), '(x/y)/x') self.assertEqual(str(mff1), '(x/y)/(x/%s)' % f_3) self.assertEqual(str(mff2), '(x/%s)/(-%s/y)' % (f_3, f_2)) self.assertEqual(str(mff3), '(x/y)/((x/y)/(x/%s))' % f_3) # Use p1 as a base expression for Symbol s3 = Symbol(format["cos"](str(p1)), CONST, p1, 1) self.assertEqual(str(s3), 'std::cos(%s*x*y)' % f_3) self.assertEqual(s3.ops(), 3) if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestMixedSymbols('testMixedSymbols')) ffc-1.6.0/test/unit/symbolics/testnotfinished.py000077500000000000000000000073201255571034100220120ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.symbolics import * from ffc.quadrature.sumobj import _group_fractions from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) class TestNotFinished(unittest.TestCase): def testNotFinished(self): "Stuff that would be nice to implement." f_1 = format["float"](1) f_2 = format["float"](2) f_4 = format["float"](4) f_8 = format["float"](8) f0 = FloatValue(4) f1 = FloatValue(2) f2 = FloatValue(8) s0 = Symbol("x", GEO) s1 = Symbol("y", GEO) s2 = Symbol("z", GEO) a = Symbol("a", GEO) b = Symbol("b", GEO) c = Symbol("c", GEO) # Aux. expressions p0 = Product([f1, s0]) p1 = Product([f2, s1]) p2 = Product([s0, s1]) F0 = Fraction(f0, s0) S0 = Sum([p0, p1]) S1 = Sum([s0, p2]) S2 = Sum([FloatValue(1), s1]) S3 = Sum([F0, F0]) # Thing to be implemented e0 = f0 / S0 e1 = s0 / S1 e2 = S2 / S1 e3 = _group_fractions(S3) e4 = Sum([Fraction(f1*s0, a*b*c), Fraction(s0, a*b)]).expand().reduce_ops() # Tests that pass the current implementation self.assertEqual(str(e0), '%s/(%s*x + %s*y)' % (f_4, f_2, f_8)) self.assertEqual(str(e1), 'x/(x + x*y)') self.assertEqual(str(e2), '(%s + y)/(x + x*y)' % f_1) self.assertEqual(str(e3), '%s/x' % f_8) self.assertEqual(str(e4), 'x*(%s/(a*b) + %s/(a*b*c))' % (f_1, f_2)) # Tests that should pass in future implementations (change NotEqual to Equal) self.assertNotEqual(str(e0), '%s/(x + %s*y)' % (f_2, f_4)) self.assertNotEqual(str(e1), '%s/(%s + y)' % (f_1, f_1)) self.assertNotEqual(str(e2), '%s/x' % f_1) self.assertNotEqual(str(e4), 'x*(%s/c + %s)/(a*b)' % (f_2, f_1)) # TODO: Would it be a good idea to reduce expressions wrt. var_type # without first expanding? E0 = Product([ Sum([ Product([ Symbol('B0', BASIS), Product([Symbol('B1', BASIS), Sum([s0]), Sum([s0])]) ]), Product([Symbol('B0', BASIS), Symbol('B1', BASIS)]) ]) ]) Er0 = E0.reduce_vartype(BASIS) Ex0 = E0.expand().reduce_vartype(BASIS) # print "%s, red(BASIS): ('%s', '%s')" %(E0, Er0[0][0], Er0[0][1]) # print "%s, red(BASIS): ('%s', '%s')" %(E0, Ex0[0][0], Ex0[0][1]) self.assertNotEqual( Ex0[0][1], Er0[0][1].expand() ) # Both of these reductions should work at the same time # 1) 2/(x/(a+b) + y/(a+b)) --> 2(a+b)/(x+y) # 2) 2/(x + y/(a+b)) --> no reduction, or if divisions are more expensive # 3) 2/(x + y/(a+b)) --> 2(a+b)/((a+b)x + y) if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestNotFinished('testNotFinished')) ffc-1.6.0/test/unit/symbolics/testpoisson.py000077500000000000000000000120471255571034100211740ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations from ffc.quadrature.symbolics import * from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) class TestPoisson(unittest.TestCase): def testPoisson(self): poisson = """((Jinv_00*FE0_D10_ip_j + Jinv_10*FE0_D01_ip_j)*(Jinv_00*FE0_D10_ip_k + Jinv_10*FE0_D01_ip_k) + (Jinv_01*FE0_D10_ip_j + Jinv_11*FE0_D01_ip_j)*(Jinv_01*FE0_D10_ip_k + Jinv_11*FE0_D01_ip_k))*W4_ip*det""" expr = Product([ Sum([ Product([ Sum([ Product([Symbol("Jinv_00", GEO), Symbol("FE0_D10_ip_j", BASIS)]) , Product([Symbol("Jinv_10", GEO), Symbol("FE0_D01_ip_j", BASIS)]) ]), Sum([ Product([Symbol("Jinv_00", GEO), Symbol("FE0_D10_ip_k", BASIS)]) , Product([Symbol("Jinv_10", GEO), Symbol("FE0_D01_ip_k", BASIS)]) ]) ]) , Product([ Sum([ Product([Symbol("Jinv_01", GEO), Symbol("FE0_D10_ip_j", BASIS)]) , Product([Symbol("Jinv_11", GEO), Symbol("FE0_D01_ip_j", BASIS)]) ]), Sum([ Product([Symbol("Jinv_01", GEO), Symbol("FE0_D10_ip_k", BASIS)]) , Product([Symbol("Jinv_11", GEO), Symbol("FE0_D01_ip_k", BASIS)]) ]) ]) ]) , Symbol("W4_ip", IP) , Symbol("det", GEO) ]) # print "\nPoisson" # start = time.time() expr_exp = expr.expand() # print "Poisson: time, expand(): ", time.time() - start # start = time.time() poisson_exp = expand_operations(poisson, format) # print "Poisson: time, old expand(): ", time.time() - start # start = time.time() expr_red = expr_exp.reduce_ops() # print "Poisson: time, reduce_ops(): ", time.time() - start # start = time.time() poisson_red = reduce_operations(poisson, format) # print "Poisson: time, old reduce(): ", time.time() - start poisson_exp_ops = operation_count(poisson_exp, format) poisson_red_ops = operation_count(poisson_red, format) # print "expr.ops(): ", expr.ops() # print "Poisson old exp: ops: ", poisson_exp_ops # print "expr_exp.ops(): ", expr_exp.ops() # print "Poisson old red: ops: ", poisson_red_ops # print "expr_red.ops(): ", expr_red.ops() # print "expr: ", expr # print "exp: ", expr_exp # print "red: ", expr_red Jinv_00, Jinv_01, Jinv_10, Jinv_11, W4_ip, det = (1.1, 1.5, -4.3, 1.7, 11, 52.3) FE0_D01_ip_j, FE0_D10_ip_j, FE0_D01_ip_k, FE0_D10_ip_k = (1.12, 5.7, -9.3, 7.4) self.assertAlmostEqual(eval(str(expr)), eval(str(expr_exp))) self.assertAlmostEqual(eval(str(expr)), eval(str(expr_red))) self.assertAlmostEqual(eval(str(expr)), eval(str(poisson))) self.assertAlmostEqual(eval(str(expr)), eval(str(poisson_exp))) self.assertAlmostEqual(eval(str(expr)), eval(str(poisson_red))) self.assertEqual(expr.ops(), 17) self.assertEqual(poisson_exp_ops, 47) self.assertEqual(expr_exp.ops(), 47) self.assertEqual(poisson_red_ops, 23) self.assertEqual(expr_red.ops(), 23) if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestPoisson('testPoisson')) ffc-1.6.0/test/unit/symbolics/testproduct.py000077500000000000000000000124451255571034100211640ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.symbolics import * from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) class TestProduct(unittest.TestCase): def testProduct(self): "Test simple product instance." f_0 = format["float"](0) f_1 = format["float"](1) f0 = FloatValue(-2.0) f1 = FloatValue(3.0) f2 = FloatValue(0) f3 = FloatValue(-1) f4 = FloatValue(1) f5 = FloatValue(-0.5) f6 = FloatValue(2.0) s0 = Symbol("x", BASIS) s1 = Symbol("y", GEO) s2 = Symbol("z", GEO) p0 = Product([]) p1 = Product([s0]) p2 = Product([s0, s1]) p3 = Product([f1, s0, s1]) p4 = Product([s0, f2, s2]) p5 = Product([s0, f0, s1, f1, s2]) p6 = Product([s0, f3, s1]) p7 = Product([s0, f4, s1]).expand().reduce_ops() p8 = Product([s0, f0, s2, f5]) p9 = Product([s0, s1]) p10 = Product([p0, p1]) p11 = Product([f5, f0]) p12 = Product([f6, f5]) p13 = Product([f6, f5]).expand() p14 = Product([f1, f2]) p_tmp = Product([f1]) p_tmp.expand() p15 = Product([p_tmp, s0]) # print "\nTesting Products" # print "\np0: [] '%s'" % (p0) # print "\np1: %s '%s'" % (s0, p1) # print "\np2: %s * %s = '%s'" % (s0, s1, p2) # print "\np3: %s * %s * %s = '%s'" % (f1, s0, s1, p3) # print "\np4: %s * %s * %s = '%s'" % (s0, f2, s2, p4) # print "\np5: %s * %s * %s * %s * %s = '%s'" % (s0, f0, s1, f1, s2, p5) # print "\np6: %s * %s * %s = '%s'" % (s0, f3, s1, p6) # print "\np7: %s * %s * %s = '%s'" % (s0, f4, s1, p7) # print "\np8: %s * %s * %s * %s = '%s'" % (s0, f0, s2, f5, p8) # print "\np9: %s * %s = '%s'" % (s0, s1, p9) # print "\np10: %s * %s = '%s'" % (p0, p1, p10) # print "\np11: %s * %s = '%s'" % (f6, f1, p11) # print "\np12: %s * %s = '%s'" % (f6, f5, p12) # print "\np13: %s * %s = '%s'" % (f6, f5, p13) # print "\np14: %s * %s = '%s'" % (f1, f2, p14) self.assertEqual(repr(p0), "Product([FloatValue(%s)])" % f_0) self.assertEqual(repr(p1), "Product([Symbol('x', BASIS)])") self.assertEqual(repr(p3), "Product([FloatValue(%s), Symbol('x', BASIS), Symbol('y', GEO)])"\ % format["float"](3)) self.assertEqual(repr(p6), "Product([FloatValue(-%s), Symbol('x', BASIS), Symbol('y', GEO)])" % f_1) self.assertEqual(repr(p7), "Product([Symbol('x', BASIS), Symbol('y', GEO)])") self.assertEqual(repr(p8), "Product([Symbol('x', BASIS), Symbol('z', GEO)])") self.assertEqual(str(p2), 'x*y') self.assertEqual(str(p4), '%s' % f_0) self.assertEqual(str(p5), '-%s*x*y*z' % format["float"](6)) self.assertEqual(str(p6), ' - x*y') self.assertEqual(str(p7), 'x*y') self.assertEqual(str(p8), 'x*z') self.assertEqual(str(p9), 'x*y') self.assertEqual(p0.val, 0) self.assertEqual(str(p10), '%s' % f_0) self.assertEqual(str(p11), '%s' % f_1) self.assertEqual(str(p12), '-%s' % f_1) self.assertEqual(str(p13), '-%s' % f_1) self.assertEqual(repr(p14), "Product([FloatValue(%s)])" % f_0) self.assertEqual(repr(p14.expand()), "FloatValue(%s)" % f_0) self.assertEqual(p1 == p1, True) self.assertEqual(p1 == p7, False) self.assertEqual(p4 != p3, True) self.assertEqual(p2 == p9, True) self.assertEqual(p2 == p3, False) self.assertEqual(p0.ops(), 0) self.assertEqual(p1.ops(), 0) self.assertEqual(p2.ops(), 1) self.assertEqual(p3.ops(), 2) self.assertEqual(p4.ops(), 0) self.assertEqual(p5.ops(), 3) self.assertEqual(p6.ops(), 1) self.assertEqual(p7.ops(), 1) self.assertEqual(p8.ops(), 1) self.assertEqual(p9.ops(), 1) self.assertEqual(p10.ops(), 0) self.assertEqual(p14.ops(), 0) # Test hash l = [p3] d = {p3:0} p10 = Product([f1, s0, s1]) self.assertEqual(p3 in l, True) self.assertEqual(p3 in d, True) self.assertEqual(p10 in l, True) self.assertEqual(p10 in d, True) self.assertEqual(p2 in l, False) self.assertEqual(p2 in d, False) if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestProduct('testProduct')) ffc-1.6.0/test/unit/symbolics/testproductoperators.py000077500000000000000000000104051255571034100231150ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations from ffc.quadrature.symbolics import * from ffc.quadrature.sumobj import _group_fractions from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) from ffc.log import error, push_level, pop_level, CRITICAL class TestProductOperators(unittest.TestCase): def testProductOperators(self): "Test binary operators" f_0 = format["float"](0) f_2 = format["float"](2) f_4 = format["float"](4) f0 = FloatValue(0.0) f1 = FloatValue(1.0) f2 = FloatValue(2.0) fm1 = FloatValue(-1.0) fm3 = FloatValue(-3.0) x = Symbol("x", GEO) y = Symbol("y", GEO) z = Symbol("z", GEO) p0 = Product([f2, x]) p1 = Product([x, y]) p2 = Product([f2, z]) p3 = Product([x, y, z]) S0 = Sum([x, y]) S1 = Sum([x, z]) F0 = Fraction(f2, x) F1 = Fraction(x, y) F2 = Fraction(x, S0) F3 = Fraction(x, y) F4 = Fraction(p0, y) # Test Product '+' self.assertEqual(str(p0 + f2), '(%s + %s*x)' % (f_2, f_2)) self.assertEqual(str(p0 + x), '%s*x' % format["float"](3)) self.assertEqual(str(p0 + y), '(y + %s*x)' % f_2) self.assertEqual(str(p0 + p0), '%s*x' % f_4) self.assertEqual(str(p0 + p1), '(%s*x + x*y)' % f_2) self.assertEqual(p0 + Product([fm1, x]), x) self.assertEqual(Product([fm1, x]) + x, f0) self.assertEqual(str(x + Product([fm1, x])), '%s' % f_0) self.assertEqual(str(p0 + S0), '(x + y + %s*x)' % f_2) self.assertEqual(str(p0 + F3), '(%s*x + x/y)' % f_2) # Test Product '-' self.assertEqual(str(p0 - f2), '(%s*x-%s)' % (f_2, f_2)) self.assertEqual(str(p0 - x), 'x') self.assertEqual(str(p0 - y), '(%s*x - y)' % f_2) self.assertEqual(str(p0 - p0), '%s' % f_0) self.assertEqual(str(p0 - p1), '(%s*x - x*y)' % f_2) self.assertEqual(str(p0 - S0), '(%s*x - (x + y))' % f_2) self.assertEqual(str(p0 - F3), '(%s*x - x/y)' % f_2) # Test Product '*', only need to test float, symbol and product. # Sum and fraction are handled by 'other' self.assertEqual(str(p0 * f0), '%s' % f_0) self.assertEqual(str(p0 * fm3), '-%s*x' % format["float"](6)) self.assertEqual(str(p0 * y), '%s*x*y' % f_2) self.assertEqual(str(p0 * p1), '%s*x*x*y' % f_2) # Test Product '/' self.assertEqual(str(Product([f0, x])/x), '%s' % f_0) self.assertEqual(str(p0/S0), '%s*x/(x + y)' % f_2) self.assertEqual(p1/y, x) self.assertEqual(p1/p2, Fraction(Product([p1, FloatValue(0.5)]), z)) self.assertEqual(p1/z, Fraction(p1, z)) self.assertEqual(p0/Product([f2, p1]), Fraction(f1, y)) self.assertEqual(p1/p0, Product([FloatValue(0.5), y])) self.assertEqual(p1/p1, f1) self.assertEqual(p1/p3, Fraction(f1, z)) self.assertEqual(str(p1/p3), '%s/z' % format["float"](1)) # Silence output push_level(CRITICAL) self.assertRaises(Exception, p0.__truediv__, f0) self.assertRaises(Exception, p0.__truediv__, F0) pop_level() if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestProductOperators('testProductOperators')) ffc-1.6.0/test/unit/symbolics/testrealexamples.py000077500000000000000000000145711255571034100221700ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules from __future__ import print_function import unittest import time # FFC modules from ffc.quadrature.symbolics import * from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) class TestRealExamples(unittest.TestCase): def testRealExamples(self): # p = Product([ # Sum([ # Product([ # Symbol('w[5][0]', GEO), # Fraction( # Product([ # Symbol('FE0_C1_D01[ip][k]', BASIS), Symbol('Jinv_10', GEO) # ]), # Product([ # Symbol('w[5][0]', GEO), Symbol('w[5][0]', GEO) # ]) # ) # # ]), # Product([ # Symbol('w[5][0]', GEO), # Fraction( # Product([ # Symbol('FE0_C1_D01[ip][k]', BASIS), Symbol('Jinv_11', GEO) # ]), # Product([ # Symbol('w[5][0]', GEO), Symbol('w[5][0]', GEO) # ]) # ) # ]) # ]) # ]) # p = Product([ # Sum([ # Product([ # Symbol('x', BASIS), # Sum([ # Symbol('y', BASIS), # Product([ # Sum([ # Symbol('y', BASIS), # Product([ # Symbol('y', BASIS), # Symbol('z', GEO) # ]), # Symbol('y', BASIS) # ]) # ]), # Symbol('y', BASIS) # ]) # ]), # Product([ # Symbol('x', BASIS), # Sum([ # Product([ # Symbol('y', BASIS), # Symbol('z', GEO) # ]), # Symbol('y', BASIS) # ]) # ]) # ]) # ]) # p = Product([ # Sum([ # Product([ # Symbol('FE0_C1_D01[ip][j]', BASIS), # Product([ # Symbol('FE0_C1_D01[ip][k]', BASIS), # Sum([ # Symbol('w[4][0]', GEO) # ]), # Sum([ # Symbol('w[4][0]', GEO) # ]) # ]) # ]), # Product([ # Symbol('FE0_C1_D01[ip][j]', BASIS), # Symbol('FE0_C1_D01[ip][k]', BASIS) # ]) # ]) # ]) p = Product([ Symbol('FE0_C1_D01[ip][k]', BASIS), Sum([ Symbol('Jinv_10', GEO), Symbol('w[4][0]', GEO) ]), Sum([ Symbol('Jinv_10', GEO), Symbol('w[4][0]', GEO) ]) ]) # print "p: ", p # print p.expand() br = p.reduce_vartype(BASIS) # print # print br[0] # print br[1] be = p.expand().reduce_vartype(BASIS) # print # print be[0][0] # print be[0][1] if len(be) == 1: if be[0][0] == br[0]: if be[0][1] != br[1].expand(): # print "\np: ", repr(p) print("\nbe: ", repr(be[0][1])) print("\nbr: ", repr(br[1].expand())) print("\nbe: ", be[0][1]) print("\nbr: ", br[1].expand()) error("here1") if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestRealExamples('testRealExamples')) ffc-1.6.0/test/unit/symbolics/testreducegip.py000077500000000000000000000317611255571034100214550ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.symbolics import * from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) class TestReduceGIP(unittest.TestCase): def testReduceGIP(self): expr = Sum([ Product([ Symbol("F17", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F18", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G0", GEO) ]), Product([ Symbol("F11", IP), Symbol("F11", IP), Symbol("F13", IP), Symbol("F13", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G1", GEO) ]), Product([ Symbol("F11", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F13", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G2", GEO) ]), Product([ Symbol("F10", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G2", GEO) ]), Product([ Symbol("F17", IP), Symbol("F18", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G3", GEO) ]), Product([ Symbol("F10", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F20", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G4", GEO) ]), Product([ Symbol("F10", IP), Symbol("F20", IP), Symbol("F8", IP), Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G4", GEO) ]), Product([ Symbol("F11", IP), Symbol("F13", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G2", GEO) ]), Product([ Symbol("F20", IP), Symbol("F8", IP), Symbol("F8", IP), Symbol("F9", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G5", GEO) ]), Product([ Symbol("F11", IP), Symbol("F12", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F20", IP), Symbol("W9", IP), Symbol("G6", GEO) ]), Product([ Symbol("F10", IP), Symbol("F10", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("F8", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G1", GEO) ]), Product([ Symbol("F10", IP), Symbol("F10", IP), Symbol("F20", IP), Symbol("F8", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G7", GEO) ]), Product([ Symbol("F17", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F19", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G2", GEO) ]), Product([ Symbol("F11", IP), Symbol("F12", IP), Symbol("F17", IP), Symbol("F19", IP), Symbol("F20", IP), Symbol("W9", IP), Symbol("G4", GEO) ]), Product([ Symbol("F11", IP), Symbol("F11", IP), Symbol("F13", IP), Symbol("F13", IP), Symbol("F20", IP), Symbol("W9", IP), Symbol("G7", GEO) ]), Product([ Symbol("F10", IP), Symbol("F17", IP), Symbol("F19", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G8", GEO) ]), Product([ Symbol("F17", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F18", IP), Symbol("F20", IP), Symbol("W9", IP), Symbol("G5", GEO) ]), Product([ Symbol("F10", IP), Symbol("F17", IP), Symbol("F19", IP), Symbol("F20", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G9", GEO) ]), Product([ Symbol("F10", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G2", GEO) ]), Product([ Symbol("F17", IP), Symbol("F18", IP), Symbol("F20", IP), Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G6", GEO) ]), Product([ Symbol("F10", IP), Symbol("F11", IP), Symbol("F13", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G8", GEO) ]), Product([ Symbol("F11", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F13", IP), Symbol("F20", IP), Symbol("W9", IP), Symbol("G4", GEO) ]), Product([ Symbol("F10", IP), Symbol("F11", IP), Symbol("F13", IP), Symbol("F20", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G9", GEO) ]), Product([ Symbol("F11", IP), Symbol("F13", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G2", GEO) ]), Product([ Symbol("F11", IP), Symbol("F12", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G3", GEO) ]), Product([ Symbol("F10", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("F8", IP), Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G2", GEO) ]), Product([ Symbol("F17", IP), Symbol("F19", IP), Symbol("F20", IP), Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G4", GEO) ]), Product([ Symbol("F11", IP), Symbol("F13", IP), Symbol("F17", IP), Symbol("F19", IP), Symbol("F20", IP), Symbol("W9", IP), Symbol("G9", GEO) ]), Product([ Symbol("F11", IP), Symbol("F13", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F20", IP), Symbol("W9", IP), Symbol("G4", GEO) ]), Product([ Symbol("F11", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F12", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G0", GEO) ]), Product([ Symbol("F17", IP), Symbol("F17", IP), Symbol("F19", IP), Symbol("F19", IP), Symbol("F20", IP), Symbol("W9", IP), Symbol("G7", GEO) ]), Product([ Symbol("F17", IP), Symbol("F17", IP), Symbol("F18", IP), Symbol("F19", IP), Symbol("F20", IP), Symbol("W9", IP), Symbol("G4", GEO) ]), Product([ Symbol("F20", IP), Symbol("F3", IP), Symbol("F8", IP), Symbol("F8", IP), Symbol("F9", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G0", GEO) ]), Product([ Symbol("F11", IP), Symbol("F12", IP), Symbol("F20", IP), Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G6", GEO) ]), Product([ Symbol("F11", IP), Symbol("F13", IP), Symbol("F17", IP), Symbol("F19", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G8", GEO) ]), Product([ Symbol("F17", IP), Symbol("F19", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G2", GEO) ]), Product([ Symbol("F10", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F20", IP), Symbol("F8", IP), Symbol("W9", IP), Symbol("G4", GEO) ]), Product([ Symbol("F11", IP), Symbol("F13", IP), Symbol("F20", IP), Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G4", GEO) ]), Product([ Symbol("F11", IP), Symbol("F11", IP), Symbol("F12", IP), Symbol("F12", IP), Symbol("F20", IP), Symbol("W9", IP), Symbol("G5", GEO) ]), Product([ Symbol("F11", IP), Symbol("F12", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("F8", IP), Symbol("F9", IP), Symbol("W9", IP), Symbol("G3", GEO) ]), Product([ Symbol("F17", IP), Symbol("F17", IP), Symbol("F19", IP), Symbol("F19", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G1", GEO) ]), Product([ Symbol("F11", IP), Symbol("F12", IP), Symbol("F17", IP), Symbol("F19", IP), Symbol("F20", IP), Symbol("F3", IP), Symbol("W9", IP), Symbol("G2", GEO) ]) ]) # print "\nReduceGIP" start = time.time() expr_exp = expr.expand() # print "ReduceGIP: time, expand() ", time.time() - start start = time.time() expr_red = expr_exp.reduce_ops() # print "ReduceGIP: time, reduce_ops(): ", time.time() - start # print "expr.ops(): ", expr.ops() # print "expr_exp.ops(): ", expr_exp.ops() # print "expr_red.ops(): ", expr_red.ops() # print "expr: ", expr # print "exp: ", expr_exp # print "red: ", expr_red W9 = 9 F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17, F18, F19, F20 = [0.123 * i for i in range(1,21)] G0, G1, G2, G3, G4, G5, G6, G7, G8, G9 = [2.64 + 1.0/i for i in range(20, 30)] self.assertAlmostEqual(eval(str(expr)), eval(str(expr_exp))) self.assertAlmostEqual(eval(str(expr)), eval(str(expr_red))) self.assertEqual(expr.ops(), 314) self.assertEqual(expr_exp.ops(), 314) self.assertEqual(expr_red.ops(), 120) if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestReduceGIP('testReduceGIP')) ffc-1.6.0/test/unit/symbolics/testreduceoperations.py000077500000000000000000000312021255571034100230470ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.symbolics import * from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) class TestReduceOperations(unittest.TestCase): def testReduceOperations(self): f_1 = format["float"](1) f_2 = format["float"](2) # Aux. variables f2 = FloatValue(2) f0_5 = FloatValue(0.5) f1 = FloatValue(1.0) fm1 = FloatValue(-1.0) x = Symbol("x", GEO) y = Symbol("y", GEO) z = Symbol("z", GEO) a = Symbol("a", GEO) b = Symbol("b", GEO) c = Symbol("c", GEO) d = Symbol("d", GEO) # Simple expand and reduce simple float and symbol objects fx2 = f2.expand() xx = x.expand() fr2 = fx2.reduce_ops() xr = xx.reduce_ops() # print "\nTest float and symbol" # print "f0: '%s'" %f2 # print "fx0: '%s'" %fx2 # print "fr0: '%s'" %fr2 # print # print "x: '%s'" %x # print "xx: '%s'" %xx # print "xr: '%s'" %xr self.assertEqual(f2, fr2) self.assertEqual(x, xr) # Test product p0 = f2*x p1 = y*x p2 = x*f2/y p3 = x*Sum([x, y]) px0 = p0.expand() px1 = p1.expand() pr0 = px0.reduce_ops() pr1 = px1.reduce_ops() # print "\nTest product" # print "p0: '%s'" %p0 # print "px0: '%s'" %px0 # print "pr0: '%s'" %pr0 # print # print "p1: '%s'" %p1 # print "px1: '%s'" %px1 # print "pr1: '%s'" %pr1 self.assertEqual(p0, pr0) self.assertEqual(p1, pr1) # Test fraction F0 = Fraction(p0, y) F1 = Fraction(x, p0) F2 = Fraction(p0, p1) F3 = Fraction(Sum([x*x, x*y]), y) F4 = Fraction(Sum([f2*x, x*y]), a) Fx0 = F0.expand() Fx1 = F1.expand() Fx2 = F2.expand() Fx3 = F3.expand() Fx4 = F4.expand() Fr0 = Fx0.reduce_ops() Fr1 = Fx1.reduce_ops() Fr2 = Fx2.reduce_ops() Fr3 = Fx3.reduce_ops() Fr4 = Fx4.reduce_ops() # print "\nTest fraction" # print "F0: '%s'" %F0 # print "Fx0: '%s'" %Fx0 # print "Fr0: '%s'" %Fr0 # print # print "F1: '%s'" %F1 # print "Fx1: '%s'" %Fx1 # print "Fr1: '%s'" %Fr1 # print # print "F2: '%s'" %F2 # print "Fx2: '%s'" %Fx2 # print "Fr2: '%s'" %Fr2 # print # print "F3: '%s'" %F3 # print "Fx3: '%s'" %Fx3 # print "Fr3: '%s'" %Fr3 # print # print "F4: '%s'" %F4 # print "Fx4: '%s'" %Fx4 # print "Fr4: '%s'" %Fr4 self.assertEqual(Fr0, F0) self.assertEqual(Fr1, f0_5) self.assertEqual(Fr2, Fraction(f2, y)) self.assertEqual(str(Fr3), "x*(%s + x/y)" % f_1) self.assertEqual(str(Fr4), "x*(%s + y)/a" % f_2) # Test sum # TODO: Here we might have to add additional tests S0 = Sum([x, y]) S1 = Sum([p0, p1]) S2 = Sum([x, p1]) S3 = Sum([p0, f2*y]) S4 = Sum([f2*p1, z*p1]) S5 = Sum([x, x*x, x*x*x]) S6 = Sum([a*x*x, b*x*x*x, c*x*x, d*x*x*x]) S7 = Sum([p0, p1, x*x, f2*z, y*z]) S8 = Sum([a*y, b*y, x*x*x*y, x*x*x*z]) S9 = Sum([a*y, b*y, c*y, x*x*x*y, f2*x*x, x*x*x*z]) S10 = Sum([f2*x*x*y, x*x*y*z]) S11 = Sum([f2*x*x*y*y, x*x*y*y*z]) S12 = Sum([f2*x*x*y*y, x*x*y*y*z, a*z, b*z, c*z]) S13 = Sum([Fraction(f1, x), Fraction(f1, y)]) S14 = Sum([Fraction(fm1, x), Fraction(fm1, y)]) S15 = Sum([Fraction(f2, x), Fraction(f2, x)]) S16 = Sum([Fraction(f2*x, y*z), Fraction(f0_5, y*z)]) S17 = Sum([(f2*x*y)/a, (x*y*z)/b]) S18 = Sum([(x*y)/a, (x*z)/a, f2/a, (f2*x*y)/a]) S19 = Sum([(f2*x)/a, (x*y)/a, z*x]) S20 = Product([ Sum([x, y]), Fraction(a, b), Fraction( Product([c, d]), z ) ]) S21 = Sum([a*x, b*x, c*x, x*y, x*z, f2*y, a*y, b*y, f2*z, a*z, b*z]) S22 = Sum([ FloatValue(0.5)*x/y, FloatValue(-0.5)*x/y ]) S23 = Sum([x*y*z, x*y*y*y*z*z*z, y*y*y*z*z*z*z, z*z*z*z*z]) Sx0 = S0.expand() Sx1 = S1.expand() Sx2 = S2.expand() Sx3 = S3.expand() Sx4 = S4.expand() Sx5 = S5.expand() Sx6 = S6.expand() Sx7 = S7.expand() Sx8 = S8.expand() Sx9 = S9.expand() Sx10 = S10.expand() Sx11 = S11.expand() Sx12 = S12.expand() Sx13 = S13.expand() Sx14 = S14.expand() Sx15 = S15.expand() Sx16 = S16.expand() Sx17 = S17.expand() Sx18 = S18.expand() Sx19 = S19.expand() Sx20 = S20.expand() Sx21 = S21.expand() Sx22 = S22.expand() Sx23 = S23.expand() Sr0 = Sx0.reduce_ops() Sr1 = Sx1.reduce_ops() Sr2 = Sx2.reduce_ops() Sr3 = Sx3.reduce_ops() Sr4 = Sx4.reduce_ops() Sr5 = Sx5.reduce_ops() Sr6 = Sx6.reduce_ops() Sr7 = Sx7.reduce_ops() Sr8 = Sx8.reduce_ops() Sr9 = Sx9.reduce_ops() Sr10 = Sx10.reduce_ops() Sr11 = Sx11.reduce_ops() Sr12 = Sx12.reduce_ops() Sr13 = Sx13.reduce_ops() Sr14 = Sx14.reduce_ops() Sr15 = Sx15.reduce_ops() Sr16 = Sx16.reduce_ops() Sr17 = Sx17.reduce_ops() Sr18 = Sx18.reduce_ops() Sr19 = Sx19.reduce_ops() Sr20 = Sx20.reduce_ops() Sr21 = Sx21.reduce_ops() Sr22 = Sx22.reduce_ops() Sr23 = Sx23.reduce_ops() # print "Test sum" # print "S0: '%s'" %S0 # print "Sx0: '%s'" %Sx0 # print "Sr0: '%s'" %Sr0 # print # print "S1: '%s'" %S1 # print "Sx1: '%s'" %Sx1 # print "Sr1: '%s'" %Sr1 # print # print "S2: '%s'" %S2 # print "Sx2: '%s'" %Sx2 # print "Sr2: '%s'" %Sr2 # print # print "S3: '%s'" %S3 # print "Sx3: '%s'" %Sx3 # print "Sr3: '%s'" %Sr3 # print # print "S4: '%s'" %S4 # print "Sx4: '%s'" %Sx4 # print "Sr4: '%s'" %Sr4 # print # print "S5: '%s'" %S5 # print "Sx5: '%s'" %Sx5 # print "Sr5: '%s'" %Sr5 # print # print "S6: '%s'" %S6 # print "Sx6: '%s'" %Sx6 # print "Sr6: '%s'" %Sr6 # print # print "S7: '%s'" %S7 # print "Sx7: '%s'" %Sx7 # print "Sr7: '%s'" %Sr7 # print # print "S8: '%s'" %S8 # print "Sx8: '%s'" %Sx8 # print "Sr8: '%s'" %Sr8 # print # print "S9: '%s'" %S9 # print "Sx9: '%s'" %Sx9 # print "Sr9: '%s'" %Sr9 # print # print "S10: '%s'" %S10 # print "Sx10: '%s'" %Sx10 # print "Sr10: '%s'" %Sr10 # print # print "S11: '%s'" %S11 # print "Sx11: '%s'" %Sx11 # print "Sr11: '%s'" %Sr11 # print # print "S12: '%s'" %S12 # print "Sx12: '%s'" %Sx12 # print "Sr12: '%s'" %Sr12 # print # print "S13: '%s'" %S13 # print "Sx13: '%s'" %Sx13 # print "Sr13: '%s'" %Sr13 # print # print "S14: '%s'" %S14 # print "Sx14: '%s'" %Sx14 # print "Sr14: '%s'" %Sr14 # print # print "S15: '%s'" %S15 # print "Sx15: '%s'" %Sx15 # print "Sr15: '%s'" %Sr15 # print # print "S16: '%s'" %S16 # print "Sx16: '%s'" %Sx16 # print "Sr16: '%s'" %Sr16 # print # print "S17: '%s'" %S17 # print "Sx17: '%s'" %Sx17 # print "Sr17: '%s'" %Sr17 # print # print "S18: '%s'" %S18 # print "Sx18: '%s'" %Sx18 # print "Sr18: '%s'" %Sr18 # print # print "S19: '%s'" %S19 # print "Sx19: '%s'" %Sx19 # print "Sr19: '%s'" %Sr19 # print # print "S20: '%s'" %S20 # print "Sx20: '%s'" %Sx20 # print "Sr20: '%s'" %Sr20 # print # print "S21: '%s'" %S21 # print "Sx21: '%s'" %Sx21 # print "Sr21: '%s'" %Sr21 # print # print "S22: '%s'" %S22 # print "Sx22: '%s'" %Sx22 # print "Sr22: '%s'" %Sr22 # print # print "S23: '%s'" %S23 # print "Sx23: '%s'" %Sx23 # print "Sr23: '%s'" %Sr23 self.assertEqual(Sr0, S0) self.assertEqual(str(Sr1), "x*(%s + y)" % f_2) # TODO: Should this be (x + x*y)? self.assertEqual(str(Sr2), "x*(%s + y)" % f_1) # self.assertEqual(str(Sr2), "(x + x*y)") self.assertEqual(str(Sr3), "%s*(x + y)" % f_2) self.assertEqual(str(Sr4), "x*y*(%s + z)" % f_2) self.assertEqual(str(Sr5), "x*(%s + x*(%s + x))" % (f_1, f_1)) self.assertEqual(str(Sr6), "x*x*(a + c + x*(b + d))") self.assertEqual(str(Sr7), "(x*(%s + x + y) + z*(%s + y))" % (f_2, f_2)) self.assertEqual(str(Sr8), "(x*x*x*(y + z) + y*(a + b))") self.assertEqual(str(Sr9), "(x*x*(%s + x*(y + z)) + y*(a + b + c))" % f_2) self.assertEqual(str(Sr10), "x*x*y*(%s + z)" % f_2) self.assertEqual(str(Sr11), "x*x*y*y*(%s + z)" % f_2) self.assertEqual(str(Sr12), "(x*x*y*y*(%s + z) + z*(a + b + c))" % f_2) self.assertEqual(str(Sr13), "(%s/x + %s/y)" % (f_1, f_1)) self.assertEqual(str(Sr14), "(-%s/x-%s/y)" % (f_1, f_1)) self.assertEqual(str(Sr15), "%s/x" % format["float"](4)) self.assertEqual(str(Sr16), "(%s + %s*x)/(y*z)" % (format["float"](0.5), f_2)) self.assertEqual(str(Sr17), "x*y*(%s/a + z/b)" % f_2) self.assertEqual(str(Sr18), "(%s + x*(z + %s*y))/a" % (f_2, format["float"](3))) self.assertEqual(str(Sr19), "x*(z + (%s + y)/a)" % f_2) self.assertEqual(str(Sr20), "a*c*d*(x + y)/(b*z)") self.assertEqual(str(Sr21), "(x*(a + b + c + y + z) + y*(%s + a + b) + z*(%s + a + b))" % (f_2, f_2)) self.assertEqual(str(Sr22), "%s" % format["float"](0)) self.assertEqual(str(Sr23), "(x*y*z + z*z*z*(y*y*y*(x + z) + z*z))") self.assertEqual(S0.ops(), 1) self.assertEqual(Sr0.ops(), 1) self.assertEqual(S1.ops(), 3) self.assertEqual(Sr1.ops(), 2) self.assertEqual(S2.ops(), 2) self.assertEqual(Sr2.ops(), 2) self.assertEqual(S3.ops(), 3) self.assertEqual(Sr3.ops(), 2) self.assertEqual(S4.ops(), 5) self.assertEqual(Sr4.ops(), 3) self.assertEqual(S5.ops(), 5) self.assertEqual(Sr5.ops(), 4) self.assertEqual(S6.ops(), 13) self.assertEqual(Sr6.ops(), 6) self.assertEqual(S7.ops(), 9) self.assertEqual(Sr7.ops(), 6) self.assertEqual(S8.ops(), 11) self.assertEqual(Sr8.ops(), 7) self.assertEqual(S9.ops(), 16) self.assertEqual(Sr9.ops(), 9) self.assertEqual(S10.ops(), 7) self.assertEqual(Sr10.ops(), 4) self.assertEqual(S11.ops(), 9) self.assertEqual(Sr11.ops(), 5) self.assertEqual(S12.ops(), 15) self.assertEqual(Sr12.ops(), 9) self.assertEqual(S13.ops(), 3) self.assertEqual(Sr13.ops(), 3) self.assertEqual(S14.ops(), 3) self.assertEqual(Sr14.ops(), 3) self.assertEqual(S15.ops(), 3) self.assertEqual(Sr15.ops(), 1) self.assertEqual(S16.ops(), 6) self.assertEqual(Sr16.ops(), 4) self.assertEqual(S17.ops(), 7) self.assertEqual(Sr17.ops(), 5) self.assertEqual(S18.ops(), 11) self.assertEqual(Sr18.ops(), 5) self.assertEqual(S19.ops(), 7) self.assertEqual(Sr19.ops(), 4) self.assertEqual(S20.ops(), 6) self.assertEqual(Sr20.ops(), 6) self.assertEqual(S21.ops(), 21) self.assertEqual(Sr21.ops(), 13) self.assertEqual(S23.ops(), 21) self.assertEqual(Sr23.ops(), 12) if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestReduceOperations('testReduceOperations')) ffc-1.6.0/test/unit/symbolics/testreducevartype.py000077500000000000000000000150721255571034100223650ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.symbolics import * from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) class TestReduceVarType(unittest.TestCase): def testReduceVarType(self): f1 = FloatValue(1) f2 = FloatValue(2) f3 = FloatValue(3) f5 = FloatValue(5) fm4 = FloatValue(-4) B0 = Symbol("B0",BASIS) B1 = Symbol("B1", BASIS) Bm4 = Product([fm4, B1]) B5 = Product([f5, B0]) I0 = Symbol("I0", IP) I1 = Symbol("I1", IP) I2 = Symbol("I2", IP) I5 = Product([f5, I0]) G0 = Symbol("G0", GEO) G1 = Symbol("G1", GEO) G2 = Symbol("G2", GEO) G3 = Product([f3, G0]) C0 = Symbol("C0", CONST) C2 = Product([f2, C0]) p0 = Product([B0,I5]) p1 = Product([B0,B1]) S0 = Sum([B0, I5]) S1 = Sum([p0, p1]) S2 = Sum([B0, B1]) S3 = Sum([B0, p0]) S4 = Sum([f5, p0]) S5 = Sum([I0, G0]) F0 = Fraction(B0,I5).expand() F1 = Fraction(p1,I5).expand() F2 = Fraction(G3,S2).expand() F3 = Fraction(G3,S3).expand() F4 = Fraction(I1, Sum([I1, I0])) F5 = Fraction(S5, I1) F6 = Fraction(I0, Sum([ Fraction(Sum([I0,I1]), Sum([G0,G1])), Fraction(Sum([I1,I2]), Sum([G1,G2])), ])) r0 = B0.reduce_vartype(BASIS) r1 = B0.reduce_vartype(CONST) rp0 = p0.reduce_vartype(BASIS) rp1 = p0.reduce_vartype(IP) rp2 = p1.reduce_vartype(BASIS) rp3 = p1.reduce_vartype(GEO) rs0 = S0.reduce_vartype(BASIS) rs1 = S0.reduce_vartype(IP) rs2 = S1.reduce_vartype(BASIS) rs3 = S4.reduce_vartype(BASIS) rs4 = S4.reduce_vartype(CONST) rf0 = F0.reduce_vartype(BASIS) rf1 = F1.reduce_vartype(BASIS) rf2 = F0.reduce_vartype(IP) rf3 = F2.reduce_vartype(BASIS) rf4 = F3.reduce_vartype(BASIS) rf5 = F4.reduce_vartype(IP) rf6 = F5.reduce_vartype(IP) rf7 = F6.reduce_vartype(IP) # print # print "%s, red(BASIS): ('%s', '%s')" %(B0, r0[0][0], r0[0][1]) # print "%s, red(CONST): ('%s', '%s')" %(B0, r1[0][0], r1[0][1]) # print "\n%s, red(BASIS): ('%s', '%s')" %(p0, rp0[0][0], rp0[0][1]) # print "%s, red(IP): ('%s', '%s')" %(p0, rp1[0][0], rp1[0][1]) # print "%s, red(BASIS): ('%s', '%s')" %(p1, rp2[0][0], rp2[0][1]) # print "%s, red(CONST): ('%s', '%s')" %(p1, rp3[0][0], rp3[0][1]) # print "\n%s, red(BASIS): ('%s', '%s')" %(S0, rs0[0][0], rs0[0][1]) # print "%s, red(IP): ('%s', '%s')" %(S0, rs1[0][0], rs1[0][1]) # print "%s, red(BASIS): '%s', '%s'" %(S1, rs2[0][0], rs2[0][1]) # print "%s, red(BASIS): '%s', '%s'" %(S4, rs3[0][0], rs3[0][1]) # print "%s, red(BASIS): '%s'" %(S4, rs4[0][0]) # print "\nrf0: %s, red(BASIS): ('%s', '%s')" %(F0, rf0[0][0], rf0[0][1]) # print "rf1: %s, red(BASIS): ('%s', '%s')" %(F1, rf1[0][0], rf1[0][1]) # print "rf2: %s, red(IP): ('%s', '%s')" %(F0, rf2[0][0], rf2[0][1]) # print "rf3: %s, red(BASIS): ('%s', '%s')" %(F2, rf3[0][0], rf3[0][1]) # print "rf4: %s, red(BASIS): ('%s', '%s')" %(F3, rf4[0][0], rf4[0][1]) # print "rf5: %s, red(IP): ('%s', '%s')" %(F4, rf5[0][0], rf5[0][1]) # print "rf6: %s, red(IP): ('%s', '%s') + ('%s', '%s')" %(F5, rf6[0][0], rf6[0][1], rf6[1][0], rf6[1][1]) # print "rf7: %s, red(IP): ('%s', '%s')" %(F6, rf7[0][0], rf7[0][1]) self.assertEqual([(B0, f1)], r0) self.assertEqual([((), B0)], r1) self.assertEqual([(B0, I5)], rp0) self.assertEqual([(I0, B5)], rp1) self.assertEqual([(p1, f1)], rp2) self.assertEqual([((), p1)], rp3) self.assertEqual(((), I5), rs0[0]) self.assertEqual((B0, f1), rs0[1]) self.assertEqual((I0, f5), rs1[1]) self.assertEqual(((), B0), rs1[0]) self.assertEqual(( Product([B0, B1]), f1), rs2[1]) self.assertEqual((B0, I5), rs2[0]) self.assertEqual(((), f5), rs3[0]) self.assertEqual((B0, I5), rs3[1]) self.assertEqual((f5, Sum([f1, Product([B0, I0])])), rs4[0]) self.assertEqual([(B0, Fraction(FloatValue(0.2), I0))], rf0) self.assertEqual([( Product([B0, B1]), Fraction(FloatValue(0.2), I0))], rf1) self.assertEqual( [( Fraction(f1, I0), Product([FloatValue(0.2), B0]) )], rf2) self.assertEqual([(Fraction(f1, S2), G3)], rf3) self.assertEqual( [( Fraction(f1, B0), Fraction( G3, Sum([I5, f1])))], rf4) self.assertEqual(F4, rf5[0][0]) self.assertEqual(FloatValue(1), rf5[0][1]) self.assertEqual(Fraction(I0,I1), rf6[1][0]) self.assertEqual(f1, rf6[1][1]) self.assertEqual(Fraction(f1,I1), rf6[0][0]) self.assertEqual(G0, rf6[0][1]) self.assertEqual(F6, rf7[0][0]) self.assertEqual(f1, rf7[0][1]) expr = Sum([Symbol('W1', GEO), Fraction(Symbol('det', GEO), Sum([Symbol('F0', IP), Symbol('K_11', GEO)]))]) red = expr.expand().reduce_vartype(IP) vals = [] for ip in red: ip_dec, geo = ip if ip_dec and geo: vals.append(Product([ip_dec, geo])) elif geo: vals.append(geo) elif ip_dec: vals.append(ip_dec) comb = Sum(vals).expand() K_11 = 1.4 F0 = 1.5 W1 = 1.9 det = 2.1 self.assertAlmostEqual(eval(str(expr)), eval(str(comb))) if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestReduceVarType('testReduceVarType')) ffc-1.6.0/test/unit/symbolics/testsum.py000077500000000000000000000077641255571034100203200ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.symbolics import * from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) class TestSum(unittest.TestCase): def testSum(self): "Test simple sum instance." f_0 = format["float"](0) f_1 = format["float"](1) f_2 = format["float"](2) f_3 = format["float"](3) f0 = FloatValue(-2.0) f1 = FloatValue(3.0) f2 = FloatValue(0) s0 = Symbol("x", BASIS) s1 = Symbol("y", GEO) s2 = Symbol("z", GEO) S0 = Sum([]) S1 = Sum([s0]) S2 = Sum([s0, s1]) S3 = Sum([s0, s0]) S4 = Sum([f0, s0]) S5 = Sum([s0, f0, s0]) S6 = Sum([s0, f0, s0, f1]) S7 = Sum([s0, f0, s1, f2]) S8 = Sum([s0, f1, s0]) S9 = Sum([f0, f0, f0, f1, f1, s1]) S10 = Sum([s1, s0]) # print "\nTesting Sum" # print "\nS0: [] '%s'" % (S0) # print "\nS1: %s = '%s'" %(s0, S1) # print "\nS2: %s + %s = '%s'" %(s0, s1, S2) # print "\nS3: %s + %s = '%s'" %(s0, s0, S3) # print "\nS4: %s + %s = '%s'" %(f0, s0, S4) # print "\nS5: %s + %s + %s = '%s'" %(s0, f0, s0, S5) # print "\nS6: %s + %s + %s + %s = '%s'" %(s0, f0, s0, f1, S6) # print "\nS7: %s + %s + %s + %s = '%s'" %(s0, f0, s1, f2, S7) # print "\nS8: %s + %s + %s = '%s'" %(s0, f1, s0, S8) # print "\nS9: %s + %s + %s + %s + %s + %s = '%s'" %(f0, f0, f0, f1, f1, s1, S9) # print "\nS10: %s + %s = '%s'" %(s1, s0, S10) self.assertEqual(repr(S0), "Sum([FloatValue(%s)])" % f_0) self.assertEqual(S0.t, CONST) self.assertEqual(repr(S1), "Sum([Symbol('x', BASIS)])") # self.assertEqual(repr(S4), "Sum([Symbol('x', BASIS), FloatValue(-2)])") self.assertEqual(repr(S4), "Sum([FloatValue(-%s), Symbol('x', BASIS)])" % f_2) self.assertEqual(repr(S9), "Sum([Symbol('y', GEO)])") self.assertEqual(str(S2), "(x + y)") self.assertEqual(str(S3), "(x + x)") self.assertEqual(str(S5), "(x + x-%s)" % f_2) self.assertEqual(str(S6), "(%s + x + x)" % f_1) self.assertEqual(str(S7), "(x + y-%s)" % f_2) self.assertEqual(str(S8), "(%s + x + x)" % f_3) self.assertEqual(str(S9), "y") self.assertEqual(S2 == S2, True) self.assertEqual(S2 == S3, False) self.assertEqual(S5 != S6, True) self.assertEqual(S2 == S10, True) self.assertEqual(S0.ops(), 0) self.assertEqual(S1.ops(), 0) self.assertEqual(S2.ops(), 1) self.assertEqual(S3.ops(), 1) self.assertEqual(S4.ops(), 1) self.assertEqual(S5.ops(), 2) self.assertEqual(S6.ops(), 2) self.assertEqual(S7.ops(), 2) self.assertEqual(S8.ops(), 2) self.assertEqual(S9.ops(), 0) # Test hash l = [S2] d = {S2:0} self.assertEqual(S2 in l, True) self.assertEqual(S2 in d, True) self.assertEqual(S10 in l, True) self.assertEqual(S10 in d, True) if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestSum('testSum')) ffc-1.6.0/test/unit/symbolics/testsumoperators.py000077500000000000000000000071741255571034100222520ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations from ffc.quadrature.symbolics import * from ffc.quadrature.sumobj import _group_fractions from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) from ffc.log import error, push_level, pop_level, CRITICAL class TestSumOperators(unittest.TestCase): def testSumOperators(self): "Test binary operators" f_0_5 = format["float"](0.5) f_1 = format["float"](1) f_2 = format["float"](2) f_3 = format["float"](3) f_6 = format["float"](6) f2 = FloatValue(2.0) fm3 = FloatValue(-3.0) x = Symbol("x", GEO) y = Symbol("y", GEO) z = Symbol("z", GEO) p0 = Product([f2, x]) p1 = Product([x, y]) S0 = Sum([x, y]) S1 = Sum([x, z]) F0 = Fraction(p0, y) # Test Sum '+' self.assertEqual(str(S0 + f2), '(%s + x + y)' % f_2) self.assertEqual(str(S0 + x), '(x + x + y)') self.assertEqual(str(S0 + p0), '(x + y + %s*x)' % f_2) self.assertEqual(str(S0 + S0), '(x + x + y + y)') self.assertEqual(str(S0 + F0), '(x + y + %s*x/y)' % f_2) # Test Sum '-' self.assertEqual(str(S0 - f2), '(x + y-%s)' % f_2) self.assertEqual(str(S0 - fm3), '(x + y + %s)' % f_3) self.assertEqual(str(S0 - x), '(x + y - x)') self.assertEqual(str(S0 - p0), '(x + y-%s*x)' % f_2) self.assertEqual(str(S0 - Product([fm3, p0])), '(x + y + %s*x)' % f_6) self.assertEqual(str(S0 - S0), '(x + y - (x + y))') self.assertEqual(str(S0 - F0), '(x + y - %s*x/y)' % f_2) # Test Sum '*' self.assertEqual(str(S0 * f2), '(%s*x + %s*y)' % (f_2, f_2)) self.assertEqual(str(S0 * x), '(x*x + x*y)') self.assertEqual(str(S0 * p0), '(%s*x*x + %s*x*y)' % (f_2, f_2)) self.assertEqual(str(S0 * S0), '(%s*x*y + x*x + y*y)' % f_2) self.assertEqual(str(S0 * F0), '(%s*x + %s*x*x/y)' % (f_2, f_2)) # Test Sum '/' self.assertEqual(str(S0 / f2), '(%s*x + %s*y)' % (f_0_5, f_0_5)) self.assertEqual(str(S0 / x), '(%s + y/x)' % f_1) self.assertEqual(str(S0 / p0), '(%s + %s*y/x)' % (f_0_5, f_0_5)) self.assertEqual(str(S0 / p1), '(%s/x + %s/y)' % (f_1, f_1)) self.assertEqual(str(S0 / S0), '(x + y)/(x + y)') self.assertEqual(str(S0 / S1), '(x + y)/(x + z)') # Silence output push_level(CRITICAL) self.assertRaises(Exception, S0.__truediv__, FloatValue(0)) self.assertRaises(Exception, S0.__truediv__, F0) pop_level() if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestSumOperators('testSumOperators')) ffc-1.6.0/test/unit/symbolics/testsymbol.py000077500000000000000000000047701255571034100210130ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.symbolics import * from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) class TestSymbol(unittest.TestCase): def testSymbol(self): "Test simple symbol instance." s0 = Symbol("x", BASIS) s1 = Symbol("y", IP) s2 = Symbol("z", GEO) s3 = Symbol("z", GEO) s4 = Symbol("z", IP) # print "\nTesting Symbols" # print "s0: '%s'" %s0 # print "s1: '%s'" %s1 # print "s2: '%s'" %s2 # print "s3: '%s'" %s3 # print "s4: '%s'" %s4 self.assertEqual(repr(s0), "Symbol('x', BASIS)") self.assertEqual(repr(s1), "Symbol('y', IP)") self.assertEqual(repr(s2), "Symbol('z', GEO)") self.assertEqual(repr(s4), "Symbol('z', IP)") self.assertEqual(s2 == s3, True) self.assertEqual(s2 == s1, False) self.assertEqual(s2 == s4, False) self.assertEqual(s2 != s3, False) self.assertEqual(s2 != s1, True) self.assertEqual(s0 < s1, True) self.assertEqual(s4 > s1, True) self.assertEqual(s0.ops(), 0) self.assertEqual(s1.ops(), 0) self.assertEqual(s2.ops(), 0) self.assertEqual(s3.ops(), 0) self.assertEqual(s4.ops(), 0) # Test hash l = [s0] d = {s0:0} s5 = Symbol('x', BASIS) self.assertEqual(s0 in l, True) self.assertEqual(s0 in d, True) self.assertEqual(s5 in l, True) self.assertEqual(s5 in d, True) if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestSymbol('testSymbol')) ffc-1.6.0/test/unit/symbolics/testsymboloperators.py000077500000000000000000000074561255571034100227560ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFC. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # First added: 2010-01-06 # Last changed: 2010-02-01 # Pyhton modules import unittest import time # FFC modules from ffc.quadrature.reduce_operations import operation_count, expand_operations, reduce_operations from ffc.quadrature.symbolics import * from ffc.quadrature.sumobj import _group_fractions from ffc.cpp import format, set_float_formatting from ffc.parameters import FFC_PARAMETERS set_float_formatting(FFC_PARAMETERS['precision']) from ffc.log import error, push_level, pop_level, CRITICAL class TestSymbolOperators(unittest.TestCase): def testSymbolOperators(self): "Test binary operators" f_0 = format["float"](0) f_1 = format["float"](1) f_2 = format["float"](2) f_3 = format["float"](3) f_0_5 = format["float"](0.5) f0 = FloatValue(0.0) f2 = FloatValue(2.0) fm1 = FloatValue(-1.0) fm3 = FloatValue(-3.0) x = Symbol("x", GEO) y = Symbol("y", GEO) z = Symbol("z", GEO) p0 = Product([f2, x]) p1 = Product([x, y]) p2 = Product([f2, z]) p3 = Product([y, x, z]) S0 = Sum([x, y]) S1 = Sum([x, z]) F0 = Fraction(f2, y) F1 = Fraction(x, y) F2 = Fraction(x, S0) F3 = Fraction(x, y) F4 = Fraction(p0, y) F5 = Fraction(fm3, y) # Test Symbol '+' self.assertEqual(str(x + f2), '(%s + x)' % f_2) self.assertEqual(str(x + x), '%s*x' % f_2) self.assertEqual(str(x + y), '(x + y)') self.assertEqual(str(x + p0), '%s*x' % f_3) self.assertEqual(str(x + p1), '(x + x*y)') self.assertEqual(str(x + S0), '(x + x + y)') self.assertEqual(str(x + F0), '(x + %s/y)' % f_2) # Test Symbol '-' self.assertEqual(str(x - f2), '(x-%s)' % f_2) self.assertEqual(str(x - x), '%s' % f_0) self.assertEqual(str(x - y), '(x - y)') self.assertEqual(str(x - p0), ' - x') self.assertEqual(str(x - p1), '(x - x*y)') self.assertEqual(str(x - S0), '(x - (x + y))') self.assertEqual(str(x - F5), '(x - -%s/y)' % f_3) # Test Symbol '*', only need to test float, symbol and product. Sum and # fraction are handled by 'other' self.assertEqual(str(x*f2), '%s*x' % f_2) self.assertEqual(str(x*y), 'x*y') self.assertEqual(str(x*p1), 'x*x*y') # Test Symbol '/' self.assertEqual(str(x/f2), '%s*x' % f_0_5) self.assertEqual(str(x/x), '%s' % f_1) self.assertEqual(str(x/y), 'x/y') self.assertEqual(str(x/S0), 'x/(x + y)') self.assertEqual(str(x/p0), '%s' % f_0_5) self.assertEqual(str(y/p1), '%s/x' % f_1) self.assertEqual(str(z/p0), '%s*z/x' % f_0_5) self.assertEqual(str(z/p1), 'z/(x*y)') # Silence output push_level(CRITICAL) self.assertRaises(Exception, x.__truediv__, F0) self.assertRaises(Exception, y.__truediv__, FloatValue(0)) pop_level() if __name__ == "__main__": # Run all returned tests runner = unittest.TextTestRunner() runner.run(TestSymbolOperators('testSymbolOperators')) ffc-1.6.0/test/unit/test.py000066400000000000000000000002071255571034100155450ustar00rootroot00000000000000from misc.test import * from symbolics.test import * from evaluate_basis.test import * if __name__ == "__main__": unittest.main() ffc-1.6.0/ufc-merge-into-ffc/000077500000000000000000000000001255571034100156345ustar00rootroot00000000000000ffc-1.6.0/ufc-merge-into-ffc/COPYING.GPL-2000066400000000000000000000432541255571034100174570ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. ffc-1.6.0/ufc-merge-into-ffc/COPYING.LGPL000066400000000000000000000167431255571034100174370ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. ffc-1.6.0/ufc-merge-into-ffc/LICENSE000066400000000000000000000036301255571034100166430ustar00rootroot00000000000000The header file ufc.h and the UFC Python utils are released into the public domain. ------------------------------------------------------------------------- Other files with different licenses attached: Files: doc/manual/code/Poisson.ufl doc/manual/fenicsmanual.cls Copyright: © 2004-2007 Anders Logg License: This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License, version 2, as published by the Free Software Foundation. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Files: doc/manual/algorithm.sty Copyright: © 1994 Peter Williams License: This style file is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This style file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this style file; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. Files: doc/manual/eps/insertion.eps Copyright: © 2004 artofcode LLC, Benicia, CA. License: This file was drawn with Inkscape by Anders Logg and is released into the public domain. ffc-1.6.0/ufc-merge-into-ffc/README.merge000066400000000000000000000011241255571034100176100ustar00rootroot00000000000000Merge of UFC into FFC 2014-02-18 performed using this recipe: http://stackoverflow.com/questions/1683531/how-to-import-existing-git-repository-into-another cd cd ffc git remote add ufc ../ufc git fetch ufc git checkout -b ufc-merge-into-ffc ufc/master mkdir ufc-merge-into-ffc git mv ufc-merge-into-ffc git commit -a git checkout master git merge ufc-merge-into-ffc git commit -a git remote rm ufc git branch -d ufc-merge-into-ffc This is followed by manually moving back files from the directory ufc-merge-into-ffc into the top level directory (in progress). ffc-1.6.0/ufc-merge-into-ffc/README.rst000066400000000000000000000036531255571034100173320ustar00rootroot00000000000000========== UFC 2.3.0+ ========== Introduction ============ UFC (Unified Form-assembly Code) is a unified framework for finite element assembly. More precisely, it defines a fixed interface for communicating low level routines (functions) for evaluating and assembling finite element variational forms. The UFC interface consists of a single header file ufc.h that specifies a C++ interface that must be implemented by code that complies with the UFC specification. Examples of form compilers that support the UFC interface are FFC and SyFi. For more information, visit the FEniCS web page at http://www.fenicsproject.org or refer to the UFC Specification and User Manual in doc/manual/ufc-user-manual.pdf in this source tree. Installation ============ To install UFC, run:: cmake . make make install This installs the header file ufc.h and a small set of Python utilities (templates) for generating UFC code. Files will be installed under the default prefix. The installation prefix may be optionally specified, for example:: cmake -DCMAKE_INSTALL_PREFIX=$HOME/local . make install Alternatively, just copy the single header file src/ufc/ufc.h into a suitable include directory. If you do not want to build and install the python extenstion module of UFC, needed by, e.g., PyDOLFIN, you can write:: cmake -DCMAKE_INSTALL_PREFIX=~/local -D UFC_ENABLE_PYTHON:BOOL=OFF . make make install For more options, it is convenient to use a CMake GUI. To use a GUI (if installed) for an out-of-source build, simply type:: mkdir build cd build cmake-gui ../ make make install AUTHORS ======= A list of authors can be found in the file AUTHORS. License ======= Details about the license can be found the file LICENSE. Feedback ======== Feedback, comments and suggestions should be sent to fenics-ufc@lists.launchpad.net For questions and bug reports, visit the UFC Launchpad page: http://www.launchpad.net/ufc ffc-1.6.0/ufc-merge-into-ffc/doc/000077500000000000000000000000001255571034100164015ustar00rootroot00000000000000ffc-1.6.0/ufc-merge-into-ffc/doc/manual/000077500000000000000000000000001255571034100176565ustar00rootroot00000000000000ffc-1.6.0/ufc-merge-into-ffc/doc/manual/Makefile000066400000000000000000000010531255571034100213150ustar00rootroot00000000000000FILENAME=ufc-user-manual all: latex $(FILENAME).tex final: latex $(FILENAME).tex bibtex $(FILENAME) latex $(FILENAME).tex makeindex $(FILENAME) latex $(FILENAME) dvips -P pdf -o $(FILENAME).ps $(FILENAME).dvi ps2pdf $(FILENAME).ps $(FILENAME).pdf clean: rm -f $(FILENAME).aux rm -f $(FILENAME).idx rm -f $(FILENAME).log rm -f $(FILENAME).out rm -f $(FILENAME).toc rm -f $(FILENAME).bbl rm -f $(FILENAME).blg rm -f $(FILENAME).ilg rm -f $(FILENAME).ind purge: clean rm -f $(FILENAME).dvi rm -f $(FILENAME).ps rm -f $(FILENAME).pdf ffc-1.6.0/ufc-merge-into-ffc/doc/manual/algorithm.sty000066400000000000000000000056101255571034100224070ustar00rootroot00000000000000% ALGORITHM STYLE -- Released 8 April 1996 % for LaTeX-2e % Copyright -- 1994 Peter Williams % % E-mail pwil3058@bigpond.net.au % % This style file is free software; you can redistribute it and/or % modify it under the terms of the GNU Lesser General Public % License as published by the Free Software Foundation; either % version 2 of the License, or (at your option) any later version. % % This style file is distributed in the hope that it will be useful, % but WITHOUT ANY WARRANTY; without even the implied warranty of % MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU % Lesser General Public License for more details. % % You should have received a copy of the GNU Lesser General Public % License along with this style file; if not, write to the % Free Software Foundation, Inc., 59 Temple Place - Suite 330, % Boston, MA 02111-1307, USA. % \NeedsTeXFormat{LaTeX2e} \ProvidesPackage{algorithm} \typeout{Document Style `algorithm' - floating environment} \RequirePackage{float} \RequirePackage{ifthen} \newcommand{\ALG@within}{nothing} \newboolean{ALG@within} \setboolean{ALG@within}{false} \newcommand{\ALG@floatstyle}{ruled} \newcommand{\ALG@name}{Algorithm} \newcommand{\listalgorithmname}{List of \ALG@name s} % Declare Options % first appearance \DeclareOption{plain}{ \renewcommand{\ALG@floatstyle}{plain} } \DeclareOption{ruled}{ \renewcommand{\ALG@floatstyle}{ruled} } \DeclareOption{boxed}{ \renewcommand{\ALG@floatstyle}{boxed} } % then numbering convention \DeclareOption{part}{ \renewcommand{\ALG@within}{part} \setboolean{ALG@within}{true} } \DeclareOption{chapter}{ \renewcommand{\ALG@within}{chapter} \setboolean{ALG@within}{true} } \DeclareOption{section}{ \renewcommand{\ALG@within}{section} \setboolean{ALG@within}{true} } \DeclareOption{subsection}{ \renewcommand{\ALG@within}{subsection} \setboolean{ALG@within}{true} } \DeclareOption{subsubsection}{ \renewcommand{\ALG@within}{subsubsection} \setboolean{ALG@within}{true} } \DeclareOption{nothing}{ \renewcommand{\ALG@within}{nothing} \setboolean{ALG@within}{true} } \DeclareOption*{\edef\ALG@name{\CurrentOption}} % ALGORITHM % \ProcessOptions \floatstyle{\ALG@floatstyle} \ifthenelse{\boolean{ALG@within}}{ \ifthenelse{\equal{\ALG@within}{part}} {\newfloat{algorithm}{htbp}{loa}[part]}{} \ifthenelse{\equal{\ALG@within}{chapter}} {\newfloat{algorithm}{htbp}{loa}[chapter]}{} \ifthenelse{\equal{\ALG@within}{section}} {\newfloat{algorithm}{htbp}{loa}[section]}{} \ifthenelse{\equal{\ALG@within}{subsection}} {\newfloat{algorithm}{htbp}{loa}[subsection]}{} \ifthenelse{\equal{\ALG@within}{subsubsection}} {\newfloat{algorithm}{htbp}{loa}[subsubsection]}{} \ifthenelse{\equal{\ALG@within}{nothing}} {\newfloat{algorithm}{htbp}{loa}}{} }{ \newfloat{algorithm}{htbp}{loa} } \floatname{algorithm}{\ALG@name} \newcommand{\listofalgorithms}{\listof{algorithm}{\listalgorithmname}} ffc-1.6.0/ufc-merge-into-ffc/doc/manual/bibliography.bib000066400000000000000000000130161255571034100230100ustar00rootroot00000000000000@misc{www:dolfin, title = {{DOLFIN}}, author = {J. Hoffman and J. Jansson and A. Logg and G. N. Wells}, year = {2006}, note = {URL: \url{http://www.fenics.org/dolfin/}} } @misc{www:ffc, author = {A. Logg}, title = {{FFC}}, year = {2007}, note = {URL: \url{http://www.fenics.org/ffc/}}, } @misc{www:syfi, author = {M. Aln\ae{}s and K--A Mardal}, title = {{S}y{F}i}, year = {2007}, note = {URL: \url{http://www.fenics.org/syfi/}}, } @misc{www:fenics, author = {J. Hoffman and J. Jansson and C. Johnson and M. G. Knepley and R. C. Kirby and A. Logg and L. R. Scott and G. N. Wells}, title = {{FE}ni{CS}}, year = {2006}, note = {URL: \url{http://www.fenics.org/}}, } @misc{www:sundance, author = {Kevin Long}, title = {Sundance}, year = {2006}, note = {URL: \url{http://software.sandia.gov/sundance/}} } @misc{www:deal.II, author = {Wolfgang Bangerth and Ralf Hartmann and Guido Kanschat}, title = {{\tt deal.{I}{I}} {D}ifferential {E}quations {A}nalysis {L}ibrary}, year = {2006}, note = {URL: \url{http://www.dealii.org/}} } @misc{www:petsc, author = {Satish Balay and Kris Buschelman and William D. Gropp and Dinesh Kaushik and Matthew G. Knepley and Lois Curfman McInnes and Barry F. Smith and Hong Zhang}, title = {{PETS}c}, year = {2006}, note = {URL: \url{http://www.mcs.anl.gov/petsc/}} } @misc{www:trilinos, title = {Trilinos}, note = {URL: \url{http://software.sandia.gov/trilinos/}}, } @manual{www:diffpack, title = {{Diffpack}}, author = {A. M. Bruaset and H. P. Langtangen and others}, year = {2006}, note = {URL: \url{http://www.diffpack.com/}} } @article{logg:article:07, author = {R. C. Kirby and M. G. Knepley and A. Logg and L. R. Scott}, title = {Optimizing the Evaluation of Finite Element Matrices}, journal = {SIAM J. Sci. Comput.}, year = {2005}, volume = {27}, number = {3}, pages = {741--758}, issn = {1064-8275} } @article{logg:article:09, author = {R. C. Kirby and A. Logg and L. R. Scott and A. R. Terrel}, title = {Topological Optimization of the Evaluation of Finite Element Ma trices}, journal = {{SIAM} J. Sci. Comput.}, year = {2006}, volume = {28}, number = {1}, pages = {224--240}, issn = {1064-8275} } @article{logg:article:10, author = "R. C. Kirby and A. Logg", title = "A Compiler for Variational Forms", journal = "{ACM} Transactions on Mathematical Software", volume = "32", number = "3", pages = "417--444", year = "2006", accepted = "13 November 2005", abstract = "As a key step towards a complete automation of the finite elem ent method, we present a new algorithm for automatic and efficient evaluati on of multilinear variational forms. The algorithm has been implement ed in the form of a compiler, the FEniCS Form Compiler FFC. We present be nchmark results for a series of standard variational forms, including t he incompressible Navier-Stokes equations and linear elasticity. T he speedup compared to the standard quadrature-based approach is impressiv e; in some cases the speedup is as large as a factor 1000.", issn = {0098-3500}, } @article{logg:article:11, author = "R. C. Kirby and A. Logg", title = "Efficient Compilation of a Class of Variational Forms", journal = "{ACM} Transactions on Mathematical Software", volume = "33", number = "3", year = "2007", accepted = "31 August 2006", abstract = "We investigate the compilation of general multilinear variatio nal forms over affines simplices and prove a representation theorem for the representation of the element tensor (element stiffness mat rix) as the contraction of a constant reference tensor and a geometr y tensor that accounts for geometry and variable coefficients. Ba sed on this representation theorem, we design an algorithm for effi cient pretabulation of the reference tensor. The new algorithm has b een implemented in the FEniCS Form Compiler~(FFC) and improves on a previous loop-based implementation by several orders of magnitu de, thus shortening compile-times and development cycles for users of FFC.", year = "2007", issn = {0098-3500}, } @book{ZieTay67, author = {O. C. Zienkiewicz and R. L. Taylor and J. Z. Zhu}, title = {The Finite Element Method --- Its Basis and Fundamentals, 6th e dition}, publisher = {Elsevier}, year = {2005, first published in 1967}, } @book{Hug87, author = {T. J. R. Hughes}, title = {The Finite Element Method: Linear Static and Dynamic Finite Ele ment Analysis}, publisher = {Prentice-Hall}, year = {1987}, } @book{Lan99, author = {H. P. Langtangen}, title = {Computational Partial Differential Equations -- Numerical Metho ds and Diffpack Programming}, publisher = {Springer}, year = {1999}, series = {Lecture Notes in Computational Science and Engineering}, } @book{Cia78, author = {P. G. Ciarlet}, title = {The Finite Element Method for Elliptic Problems}, publisher = {North-Holland, Amsterdam, New York, Oxford}, year = {1978}, } ffc-1.6.0/ufc-merge-into-ffc/doc/manual/chapters/000077500000000000000000000000001255571034100214675ustar00rootroot00000000000000ffc-1.6.0/ufc-merge-into-ffc/doc/manual/chapters/assembly.tex000066400000000000000000000375241255571034100240430ustar00rootroot00000000000000\chapter{Finite element assembly} \label{sec:assembly} \newtheorem{example}{\small{\sc{Example}}}[section] In this section, we present a general algorithm for assembly of finite element variational forms and define the concepts that the UFC interface is based on. \section{Finite Element Discretization} \label{sec:fem} \subsection{The Finite Element} \index{finite element} A finite element is mathematically defined as a triplet consisting of a polygon, a polynomial function space, and a set of linear functionals, see~\cite{Cia78}. Given that the dimension of the function space and the number of the (linearly independent) linear functionals are equal, the finite element is uniquely defined. Hence, we will refer to a finite element as a collection of \begin{itemize} \item a polygon $K$, \item a polynomial space $\mathcal{P}_K$ on $K$, \item a set of linearly independent linear functionals, the \emph{degrees of freedom}, $L_i : \mathcal{P}_K \rightarrow \R, \, i = 1, 2, \ldots, n$. \end{itemize} \subsection{Variational Forms} \index{variational form} Consider the weighted Poisson problem $- \nabla \cdot (w \nabla u) = f$ with Dirichlet boundary conditions on a domain $\Omega \subset \R^d$. Multiplying by a test function $v \in V_h$ and integrating by parts, one obtains the variational problem \begin{equation} \label{eq:weightedpoisson} \int_{\Omega} w \nabla v \cdot \nabla u \dx = \int_{\Omega} v f \dx \quad \forall v \in V_h, \end{equation} for $u \in V_h$. If $w, f \in W_h$ for some discrete finite element space $W_h$ (which may be different from $V_h$), we may thus write~(\ref{eq:weightedpoisson}) as \begin{equation} a(v, u; w) = L(v; f) \quad \forall v \in V_h, \end{equation} where the trilinear form $a : V_h \times V_h \times W_h \rightarrow \R$ is given by \begin{equation} a(v, u; w) = \int_{\Omega} w \nabla v \cdot \nabla u \dx \end{equation} and the bilinear form $L : V_h \times W_h \rightarrow R$ is given by \begin{equation} L(v; f) = \int_{\Omega} v f \dx. \end{equation} Note here that $a$ is \emph{bilinear} for any given fixed $w \in W_h$ and $L$ is \emph{linear} for any given fixed $f \in W_h$. In general, we shall be concerned with the discretization of finite element variational forms of general arity~$r + n > 0$, \begin{equation} \label{eq:variationalform} a : V_h^1 \times V_h^2 \times \cdots \times V_h^r \times W_h^1 \times W_h^2 \times \cdots \times W_h^n \rightarrow \R, \end{equation} defined on the product space $V_h^1 \times V_h^2 \times \cdots \times V_h^r \times W_h^1 \times W_h^2 \times \cdots \times W_h^n$ of two sets $\{V_h^j\}_{j=1}^r, \{W_h^j\}_{j=1}^n$ of discrete finite element function spaces on $\Omega$. We refer to $(v_1, v_2, \ldots, v_r) \in V_h^1 \times V_h^2 \times \cdots \times V_h^r$ as \emph{primary arguments}, and to $(w_1, w_2, \ldots, w_n) \in W_h^1 \times W_h^2 \times \cdots \times W_h^n$ as \emph{coefficients} and write \begin{equation} a = a(v_1, \ldots, v_r; w_1, \ldots, w_n). \label{eq:gen_form} \end{equation} In the simplest case, all function spaces are equal but there are many important examples, such as mixed methods, where the arguments come from different function spaces. \subsection{Discretization} \label{sec:Discretization} To discretize the form $a$, we introduce bases $\{\phi_i^1\}_{i=1}^{N^1}, \{\phi_i^2\}_{i=1}^{N^2}, \ldots, \{\phi_i^r\}_{i=1}^{N^r}$ for the function spaces $V_h^1, V_h^2, \ldots, V_h^r$ respectively and let $i = (i_1, i_2, \ldots, i_r)$ be a multiindex of length $|i| = r$. The form $a$ then defines a rank~$r$ tensor given by \begin{equation} \label{eq:tensor} A_i = a(\phi_{i_1}^1, \phi_{i_2}^2, \ldots, \phi_{i_r}^r; w_1, w_2, \ldots, w_n) \quad \forall i \in \mathcal{I}, \end{equation} where $\mathcal{I}$ is the index set \begin{equation} \begin{split} & \mathcal{I} = \prod_{j=1}^r[1,|V^j_h|] = \\ & \{(1,1,\ldots,1), (1,1,\ldots,2), \ldots, (N^1,N^2,\ldots,N^r)\}. \end{split} \end{equation} We refer to the tensor~$A$ as the \emph{discrete operator} generated by the form~$a$ and the particular choice of basis functions. For any given form of arity~$r + n$, the tensor~$A$ is a (typically sparse) tensor of rank~$r$ and dimension $|V_h^1| \times |V_h^2| \times \ldots \times |V_h^r| = N^1 \times N^2 \times \ldots \times N^r$. \index{global tensor} Typically, the rank $r$ is 0, 1, or 2. When $r = 0$, the tensor $A$ is a scalar (a tensor of rank zero), when $r = 1$, the tensor $A$ is a vector (the ``load vector'') and when $r = 2$, the tensor $A$ is a matrix (the ``stiffness matrix''). Forms of higher arity also appear, though they are rarely assembled as a higher-dimensional sparse tensor. Note here that we consider the functions $w_1, w_2, \ldots, w_n$ as fixed in the sense that the discrete operator~$A$ is computed for a given set of functions, which we refer to as \emph{coefficients}. As an example, consider again the variational problem~(\ref{eq:weightedpoisson}) for the weighted Poisson's equation. For the trilinear form~$a$, the rank is $r = 2$ and the number of coefficients is $n = 1$, while for the linear form~$L$, the rank is $r = 1$ and the number of coefficients is $n = 1$. We may also choose to directly compute the \emph{action} of the form $a$ obtained by assembling a vector from the form \begin{equation} a(v_1; w_1, w2) = \int_{\Omega} w_1 \nabla v_1 \cdot \nabla w_2 \dx, \end{equation} where now $r = 1$ and $n = 2$. We list below a few other examples to illustrate the notation. \begin{example} \label{example:div} Our first example is related to the divergence constraint in fluid flow. Let the form~$a$ be given by \begin{equation} a(q, u) = \int_{\Omega} q \nabla \cdot u \dx, \quad q\in V_h^1, \quad u\in V_h^2, \end{equation} where $V_h^1$ is a space of scalar-valued functions and where $V_h^2$ is a space of vector-valued functions. The form $a : V_h^1 \times V_h^2 \rightarrow \R$ has two primary arguments and thus $r = 2$. Furthermore, the form does not depend on any coefficients and thus $n=0$. \end{example} \begin{example} \label{example:linearconv} Another common form in fluid flow (with variable density) is \begin{equation} a(v,u;w,\varrho) = \int_{\Omega} v \, \varrho \, w \cdot \nabla u \dx. \end{equation} Here, $v\in V_h^1,\ u \in V_h^2,\ w\in W_h^1, \ \varrho \in W_h^2$, where $V_h^1$, $V_h^2$, and $W_h^1$ are spaces of vector-valued functions, while $W_h^2$ is a space of scalar-valued functions. The form takes four arguments, where two of the arguments are coefficients, \begin{equation} a : V_h^1 \times V_h^2 \times W_h^1 \times W_h^2 \rightarrow \R. \end{equation} Hence, $r=2$ and $n=2$. \end{example} \begin{example} The $H^1(\Omega)$ norm of the error $e = u - u_h$ squared is \begin{equation} a(;u, u_h) = \int_{\Omega} (u - u_h)^2 + |\nabla (u - u_h)|^2 \dx. \end{equation} The form takes two arguments and both are coefficients, \begin{equation} a : W_h^1 \times W_h^2 \rightarrow \R. \end{equation} Hence, $r=0$ and $n=2$. \end{example} \section{Finite Element Assembly} \index{assembly} The standard algorithm for computing the global sparse tensor~$A$ is known as \emph{assembly}, see~\cite{ZieTay67,Hug87}. By this algorithm, the tensor~$A$ may be computed by assembling (summing) the contributions from the local entities of a finite element mesh. To express this algorithm for assembly of the global sparse tensor~$A$ for a general finite element variational form of arity~$r$, we introduce the following notation and assumptions. Let $\mathcal{T} = \{K\}$ be a set of disjoint \emph{cells} (a triangulation) partitioning the domain $\Omega = \cup_{K\in\mathcal{T}} K$. Further, let $\partial_e \mathcal{T}$ denote the set of \emph{exterior facets} (the set of cell facets incident with the boundary $\partial \Omega$), and let $\partial_i \mathcal{T}$ denote the set of $\emph{interior facets}$ (the set of cell facets non-incident with the boundary $\partial \Omega$). For each discrete function space $V_h^j, \, j=1,2,\ldots,r$, we assume that the global basis~$\{\phi_i^j\}_{i=1}^{N^j}$ is obtained by patching together local function spaces $\mathcal{P}_K^j$ on each cell~$K$ as determined by a local-to-global mapping. We shall further assume that the variational form~(\ref{eq:variationalform}) may be expressed as a sum of integrals over the cells~$\mathcal{T}$, the exterior facets~$\partial_e \mathcal{T}$ and the interior facets~$\partial_i \mathcal{T}$. We shall allow integrals expressed on disjoint subsets $\mathcal{T} = \cup_{k=1}^{n_c} \mathcal{T}_k$, $\partial_e \mathcal{T} = \cup_{k=1}^{n_e} \partial_e \mathcal{T}_k$ and $\partial_i \mathcal{T} = \cup_{k=1}^{n_i} \partial_i \mathcal{T}_k$ respectively. We thus assume that the form $a$ is given by \begin{equation} \begin{split} & a(v_1, \ldots, v_r; w_1, \ldots, w_n) = \\ &\ \ \ \sum_{k=1}^{n_c} \sum_{K\in\mathcal{T}_k} \int_{K} I^c_k(v_1, \ldots, v_r; w_1, \ldots w_n) \dx \\ &+ \sum_{k=1}^{n_e} \sum_{S\in\partial_e\mathcal{T}_k} \int_{S} I^e_k(v_1, \ldots, v_r; w_1, \ldots, w_n) \ds \\ &+ \sum_{k=1}^{n_i} \sum_{S\in\partial_i\mathcal{T}_k} \int_{S} I^i_k(v_1, \ldots, v_r; w_1, \ldots, w_n) \ds. \end{split} \label{eq:form_integrals} \end{equation} We refer to an integral over a cell~$K$ as a \emph{cell integral}, an integral over an exterior facet~$S$ as an \emph{exterior facet integral} (typically used to implement Neumann and Robin type boundary conditions), and to an integral over an interior facet~$S$ as an \emph{interior facet integral} (typically used in discontinuous Galerkin methods). For simplicity, we consider here initially assembly of the global sparse tensor~$A$ corresponding to a form~$a$ given by a single integral over all cells $\mathcal{T}$, and later extend to the general case where we must also account for contributions from several cell integrals, interior facet integrals and exterior facet integrals. We thus consider the form \begin{equation} \begin{split} &a(v_1, \ldots, v_r; w_1, \ldots, w_n) = \\ & \ \ \ \sum_{K\in\mathcal{T}} \int_K I^c(v_1, \ldots, v_r; w_1, \ldots, w_n) \dx, \end{split} \end{equation} for which the global sparse tensor~$A$ is given by \begin{equation} A_i = \sum_{K\in\mathcal{T}} \int_K I^c(\phi^1_{i_1}, \ldots, \phi^r_{i_r}; w_1, \ldots, w_n) \dx. \end{equation} To see how to compute the tensor $A$ by summing the local contributions from each cell~$K$, we let $n^j_K = |\mathcal{P}^j_K|$ denote the dimension of the local finite element space on $K$ for the $j$th primary argument $v_j \in V_h^j$ for $j = 1,2,\ldots,r$. Furthermore, let \begin{equation} \iota_K^j : [1,n_K^j] \rightarrow [1,N^j] \label{eq:iota_K} \end{equation} denote the local-to-global mapping for~$V_h^j$, that is, on any given $K\in\mathcal{T}$, the mapping $\iota_K^j$ maps the number of a local degree of freedom (or, equivalently, local basis function) to the number of the corresponding global degree of freedom (or, equivalently, global basis function). We then define for each $K \in \mathcal{T}$ the collective local-to-global mapping $\iota_K : \mathcal{I}_K \rightarrow \mathcal{I}$ by \begin{equation} \iota_K(i) = (\iota_K^1(i_1),\iota_K^2(i_2),\ldots,\iota_K^r(i_r)) \quad \forall i \in \mathcal{I}_K, \end{equation} where $\mathcal{I}_K$ is the index set \begin{equation} \begin{split} & \mathcal{I}_K = \prod_{j=1}^r[1,|\mathcal{P}_K^j|] \\ & = \{(1,1,\ldots,1), (1,1,\ldots,2), \ldots, (n_K^1,n_K^2,\ldots,n_K^r)\}. \end{split} \end{equation} Furthermore, for each $V_h^j$ we let $\{\phi^{K,j}_i\}_{i=1}^{n_K^j}$ denote the restriction to an element $K$ of the subset of the basis $\{\phi_i^j\}_{i=1}^{N^j} \subset \mathcal{P}_K^j$ of $V_h^j$ supported on $K$. We may now compute~$A$ by summing the contributions from the local cells, \begin{equation} \begin{split} A_i &= \sum_{K\in\mathcal{T}_i} \int_K I^c(\phi_{i_1}^1, \ldots, \phi_{i_r}^r; w_1, \ldots, w_n) \dx \\ &= \sum_{K\in\mathcal{T}_i} \int_K I^c(\phi_{(\iota_K^1)^{-1}(i_1)}^{K,1}, \ldots, \phi_{(\iota_K^r)^{-1}(i_r)}^{K,r}; w_1, \ldots, w_n) \dx \\ &= \sum_{K\in\mathcal{T}_i} A^K_{\iota_K^{-1}(i)}, \end{split} \end{equation} where $A^K$ is the local \emph{cell tensor} on cell $K$ (the ``element stiffness matrix''), given by \begin{equation} A^K_i = \int_K I^c(\phi_{i_1}^{K,1}, \ldots, \phi_{i_r}^{K,r}; w_1, \ldots, w_n) \dx, \\ \end{equation} and where $\mathcal{T}_i$ denotes the set of cells on which all basis functions $\phi_{i_1}^1, \phi_{i_2}^2, \ldots, \phi_{i_r}^r$ are supported. Similarly, we may sum the local contributions from the exterior and interior facets in the form of local \emph{exterior facet tensors} and \emph{interior facet tensors}. \index{cell tensor} \index{exterior facet tensor} \index{interior facet tensor} \begin{figure}[htbp] \begin{center} \psfrag{i0}{\hspace{-0.5cm}$\iota_K^1(1)$} \psfrag{i1}{\hspace{-0.5cm}$\iota_K^1(2)$} \psfrag{i2}{\hspace{-0.5cm}$\iota_K^1(3)$} \psfrag{j0}{\hspace{-0.3cm}$\iota_K^2(1)$} \psfrag{j1}{\hspace{-0.5cm}$\iota_K^2(2)$} \psfrag{j2}{\hspace{-0.1cm}$\iota_K^2(3)$} \psfrag{A21}{$A^K_{32}$} \psfrag{1}{$1$} \psfrag{2}{$2$} \psfrag{3}{$3$} \includegraphics[height=3in]{eps/insertion.eps} \caption{Adding the entries of a cell tensor~$A^K$ to the global tensor~$A$ using the local-to-global mapping $\iota_K$, illustrated here for a rank two tensor (a matrix).} \label{fig:insertion} \end{center} \end{figure} In Algorithm~\ref{alg:assembly}, we present a general algorithm for assembling the contributions from the local cell, exterior facet and interior facet tensors into a global sparse tensor. In all cases, we iterate over all entities (cells, exterior or interior facets), compute the local cell tensor $A^K$ (or exterior/interior facet tensor $A^S$) and add it to the global sparse tensor as determined by the local-to-global mapping, see~Figure~\ref{fig:insertion}. \begin{algorithm} \footnotesize $A = 0$ \\ (i) \emph{Assemble contributions from all cells} \\ \textbf{for each} $K \in \mathcal{T}$ \\ \\ \tab \textbf{for} $j = 1,2,\ldots,r$: \\ \tab\tab Tabulate the local-to-global mapping $\iota_K^j$ \\ \\ \tab \textbf{for} $j = 1,2,\ldots,n$: \\ \tab\tab Extract the values of $w_j$ on $K$ \\ \\ \tab Take $0 \leq k \leq n_c$ such that $K \in \mathcal{T}_k$ \\ \tab Tabulate the cell tensor $A^K$ for $I^c_k$ \\ \tab Add $A^K_i$ to $A_{\iota_K^1(i_1), \iota_K^2(i_2), \ldots, \iota_K^r(i_r)}$ for $i\in I_K$ \\ \\ (ii) \emph{Assemble contributions from all exterior facets} \\ \textbf{for each} $S \in \partial_e\mathcal{T}$ \\ \\ \tab \textbf{for} $j = 1,2,\ldots,r$: \\ \tab\tab Tabulate the local-to-global mapping $\iota_{K(S)}^j$ \\ \\ \tab \textbf{for} $j = 1,2,\ldots,n$: \\ \tab\tab Extract the values of $w_j$ on $K(S)$ \\ \\ \tab Take $0 \leq k \leq n_e$ such that $S \in \partial_e \mathcal{T}_k$ \\ \tab Tabulate the exterior facet tensor $A^S$ for $I^e_k$ \\ \tab Add $A^S_i$ to $A_{\iota_{K(S)}^1(i_1), \iota_{K(S)}^2(i_2), \ldots, \iota_{K(S)}^r(i_r)}$ for $i\in I_{K(S)}$ \\ \\ \\ (iii) \emph{Assemble contributions from all interior facets} \\ \textbf{for each} $S \in \partial_i\mathcal{T}$ \\ \\ \tab \textbf{for} $j = 1,2,\ldots,r$: \\ \tab\tab Tabulate the local-to-global mapping $\iota_{K(S)}^j$ \\ \\ \tab \textbf{for} $j = 1,2,\ldots,n$: \\ \tab\tab Extract the values of $w_j$ on $K(S)$ \\ \\ \tab Take $0 \leq k \leq n_i$ such that $S \in \partial_i \mathcal{T}_k$ \\ \tab Tabulate the interior facet tensor $A^S$ for $I^i_k$ \\ \tab Add $A^S_i$ to $A_{\iota_{K(S)}^1(i_1), \iota_{K(S)}^2(i_2), \ldots, \iota_{K(S)}^r(i_r)}$ for $i\in I_{K(S)}$ \\ \caption{Assembling the global tensor~$A$ from the local contributions on all cells, exterior and interior facets. For assembly over exterior facets, $K(S)$ refers to the cell $K\in\mathcal{T}$ incident to the exterior facet~$S$, and for assembly over interior facets, $K(S)$ refers to the ``macro cell'' consisting of the pair of cells $K^+$ and $K^-$ incident to the interior facet~$S$.} \label{alg:assembly} \end{algorithm} \normalsize ffc-1.6.0/ufc-merge-into-ffc/doc/manual/chapters/assembly_cpp.tex000066400000000000000000000063041255571034100246750ustar00rootroot00000000000000\chapter{A basic UFC-based assembler} \label{app:assembly} Below, we include a sketch of a UFC-based implementation of the assembly of the global tensor~$A$ by summing the local contributions from all cells. The contributions from all exterior and interior facets may be computed similarly. The implementation is incomplete and system specific details such as interaction with mesh and linear algebra libraries have been omitted.\footnote{For an example of a complete implementation of a UFC-based assembler, we refer to the source code of DOLFIN~\cite{www:dolfin}, in particular class \texttt{Assembler} as implemented in \texttt{Assembler.cpp}.} \scriptsize \begin{code} void assemble(..., ufc::form& form, ...) { ... // Initialize mesh data structure ufc::mesh mesh; mesh.num_entities = new unsigned int[...]; ... // Initialize cell data structure ufc::cell cell; cell.entity_indices = new unsigned int[...]; cell.coordinates = new double[...]; ... // Create cell integrals ufc::cell_integral** cell_integrals; cell_integrals = new ufc::cell_integral*[form.num_cell_domains()]; for (unsigned int i = 0; i < form.num_cell_domains(); i++) cell_integrals[i] = form.create_cell_integral(i); // Create dofmaps ufc::dofmaps** dofmaps; dofmaps = new ufc::dofmap*[form.rank() + form.num_coefficients()]; for (unsigned int i = 0; i < form.rank() + form.num_coefficients(); i++) { dofmaps[i] = form.create_dofmap(i); // Initialize dofmap if (dofmaps[i]->init_mesh(mesh)) { // Iterate over cells for (...) { // Update cell data structure to current cell cell.entity_indices[...] = ... cell.coordinates[...] = ... ... // Initialize dofmap for cell dofmaps[i]->init_cell(mesh, cell); } dofmap.init_cell_finalize(); } } // Initialize array of values for the cell tensor unsigned int size = 1; for (unsigned int i = 0; i < form.rank(); i++) size *= dofmaps[i]->max_local_dimension(); double* AK = new double[size]; // Initialize array of local to global dofmaps unsigned int** dofs = new unsigned int*[form.rank()]; for (unsigned int i = 0; i < form.rank(); i++) dofs[i] = new unsigned int[dofmaps[i]->max_local_dimension()]; // Initialize array of coefficient values double** w = new double*[form.num_coefficients()]; for (unsigned int i = 0; i < form.num_coefficients(); i++) w[i] = new double[dofmaps[form.rank() + i]->max_local_dimension()]; // Iterate over cells for (...) { // Get number of subdomain for current cell const unsigned int sub_domain = ... // Update cell data structure to current cell cell.entity_indices[...] = ... cell.coordinates[...] = ... ... // Interpolate coefficients (library specific so omitted here) ... // Tabulate dofs for each dimension for (unsigned int i = 0; i < ufc.form.rank(); i++) dofmaps[i]->tabulate_dofs(dofs[i], mesh, cell); // Tabulate cell tensor cell_integrals[sub_domain]->tabulate_tensor(AK, w, cell); // Add entries to global tensor (library specific so omitted here) ... } // Delete data structures delete [] mesh.num_entities; ... } \end{code} \normalsize ffc-1.6.0/ufc-merge-into-ffc/doc/manual/chapters/examples.tex000066400000000000000000000055661255571034100240430ustar00rootroot00000000000000\chapter{Complete UFC code for Poisson's equation} \index{Poisson's equation} \index{example code} In this section, a simple example is given of UFC code generated by the form compilers FFC~\cite{www:ffc,logg:article:07,logg:article:09,logg:article:10,logg:article:11} and SyFi~\cite{www:syfi} for Poisson's equation. The code presented below is generated for the bilinear form of Poisson's equation for standard continuous piecewise linear Lagrange finite elements on a two-dimensional domain $\Omega$, \begin{equation} a(v, u) = \int_{\Omega} \nabla v \cdot \nabla u \dx. \end{equation} Although FFC and SyFi are two different form compilers, with very different approaches to code generation, both generate code that conforms to the UFC specification and may thus be used interchangeably within any UFC-based system. In the generated code, we have omitted the two functions \texttt{evaluate\_basis} and \texttt{evaluate\_basis\_derivatives}\footnotemark{} to save space. \footnotetext{For FFC, this may be done by using the compiler flags \texttt{-fno-evaluate\_basis} and \texttt{-fno-evaluate\_basis\_derivatives}.} \section{Code generated by FFC} \index{FFC} \scriptsize \VerbatimInput[frame=single,rulecolor=\color{blue}]{code/poisson_ffc.h} \normalsize \section{Code generated by SyFi} \index{SyFi} In the following we list the complete code for the finite element, the dofmap and the variational form for computing a stiffness matrix based on linear Lagrangian elements in 2D. The code below is generated for the assembler in PyCC and it therefore includes some PyCC files, since the option \texttt{SFC.options.include\_from = "pycc"} was used during the code generation. If PyCC is not present, the option \texttt{SFC.options.include\_from = "syfi"} can be used, and this will result in some additional files that define the numbering scheme. \subsection{Header file for linear Lagrange element in 2D} \scriptsize \VerbatimInput[frame=single,rulecolor=\color{blue}]{code/stiffness_syfi/fe_Lagrange_1_2D.h} \normalsize \subsection{Source file for linear Lagrange element in 2D} \scriptsize \VerbatimInput[frame=single,rulecolor=\color{blue}]{code/stiffness_syfi/fe_Lagrange_1_2D.cpp} \normalsize \subsection{Header file for the dofmap} \scriptsize \VerbatimInput[frame=single,rulecolor=\color{blue}]{code/stiffness_syfi/dof_map_Lagrange_1_2D.h} \normalsize \subsection{Source file for the dofmap} \scriptsize \VerbatimInput[frame=single,rulecolor=\color{blue}]{code/stiffness_syfi/dof_map_Lagrange_1_2D.cpp} \normalsize \subsection{Header file for the stiffness matrix form} \scriptsize \VerbatimInput[frame=single,rulecolor=\color{blue}]{code/stiffness_syfi/form__stiffness_form__Lagrange_1_2D.h} \normalsize \subsection{Source file for the stiffness matrix form} \scriptsize \VerbatimInput[frame=single,rulecolor=\color{blue}]{code/stiffness_syfi/form__stiffness_form__Lagrange_1_2D.cpp} \normalsize ffc-1.6.0/ufc-merge-into-ffc/doc/manual/chapters/installation.tex000066400000000000000000000023111255571034100247070ustar00rootroot00000000000000\chapter{Installation} \label{app:installation} \index{installation} The \ufc{} package consists of two parts, the main part being a single header file called \texttt{ufc.h}. In addition, a set of Python utilities to simplify the generation of \ufc{} code is provided. Questions, bug reports and patches concerning the installation should be directed to the \ufc{} mailing list at the address \begin{code} fenics@lists.launchpad.net \end{code} \section{Installing \ufc{}} To install UFC, simply run \begin{code} scons sudo scons install \end{code} This installs the header file ufc.h and a small set of Python utilities (templates) for generating UFC code. Files will be installed under the default prefix. The installation prefix may be optionally specified, for example \begin{code} scons install prefix=~/local \end{code} Alternatively, just copy the single header file \texttt{src/ufc/ufc.h} into a suitable include directory. If you do not want to build and install the python extenstion module of \ufc{}, needed by, e.g., PyDOLFIN, you can write \begin{code} sudo enablePyUFC=No sudo cons install \end{code} Help with available options and default arguments can be viewed by \begin{code} scons -h \end{code} ffc-1.6.0/ufc-merge-into-ffc/doc/manual/chapters/interface.tex000066400000000000000000001322161255571034100241560ustar00rootroot00000000000000\chapter{Interface specification} \label{sec:interface} \index{interface} \section{A short remark on design} \index{design} UFC is organized as a minimalistic set of abstract C++ classes representing low-level abstractions of the finite element method. The functions in these classes are mainly of two types: (i) functions returning dimensions, which are typically used to allocate an array, and (ii) functions that fill an array with values. It is considered the assembly routine's responsibility to allocate and deallocate arrays of proper size. Consider for example the function for evaluating the $i$th basis function in the class \texttt{ufc::finite\_element}: \begin{code} virtual void evaluate_basis(unsigned int i, double* values, const double* coordinates, const cell& c) const = 0; \end{code} This function assumes that the array \texttt{values} has the correct size, which may be obtained by calling the functions \texttt{value\_rank} and \texttt{value\_dimension} as described in detail below. Thus, the UFC interface is a low-level interface that should be simple to integrate into an existing C++ finite element code, but which is probably not suitable to be exposed as part of an end-user interface. The UFC interface is defined by a single header file \texttt{ufc.h} which defines the central interface class \texttt{ufc::form} and a small set of auxiliary interface classes. In addition, a pair of data structures \texttt{ufc::mesh} and \texttt{ufc::cell} are defined and used for passing data to the interface functions. All functions defined by the UFC interface are \emph{pure virtual}, meaning that all these functions must be overloaded in each implementation of the classes. All but two functions (\texttt{init\_mesh} and \texttt{init\_cell}) are \texttt{const}, meaning that calling these \texttt{const} functions will leave the UFC objects unchanged. Input argument (pointers) are always \texttt{const}, while output arguments (pointers) are always non-\texttt{const}. \index{ufc.h} The interface is presented below in the same order as it is defined in the header file \texttt{ufc.h}. Thus, the interface is presented bottom-up, starting with the definition of basic data structures and ending with the definition of the main \texttt{ufc::form} interface class. \section{Cell shapes} \index{Cell shapes} \index{\texttt{interval}} \index{\texttt{triangle}} \index{\texttt{quadrilateral}} \index{\texttt{tetrahedron}} \index{\texttt{hexahedron}} \begin{code} enum shape {interval, triangle, quadrilateral, tetrahedron, hexahedron}; \end{code} This enumeration includes all cell shapes that are covered by the UFC specification, see Chapter~\ref{sec:referencecells}. \section{The class \texttt{ufc::mesh}} \index{\texttt{ufc::mesh}} The class \texttt{ufc::mesh} defines a data structure containing basic information about an unstructured mesh. It is used for passing a minimal amount of information about the global mesh to UFC functions. \subsection{The integer \texttt{topological\_dimension}} \begin{code} unsigned int topological_dimension; \end{code} The unsigned integer \texttt{topological\_dimension} holds the topological dimension of the mesh, that is, the topological dimension of the cells of the mesh. For the supported cell shapes defined above, the topological dimensions are as follows: \texttt{interval} has dimension one, \texttt{triangle} and \texttt{quadri\-lateral} have dimension two, and \texttt{tetrahedron} and \texttt{hexahedron} have dimension three. \subsection{The integer \texttt{geometric\_dimension}} \begin{code} unsigned int geometric_dimension; \end{code} The unsigned integer \texttt{geometric\_dimension} holds the geometric dimension of the mesh, that is, the dimension of the coordinates of the mesh vertices. Often, the geometric dimension is equal to the topological dimension, but they may differ. For example, one may have a topologically two-dimensional mesh embedded in three-dimensional space. \subsection{The array \texttt{num\_entities}} \begin{code} unsigned int* num_entities; \end{code} The array \texttt{num\_entities} should contain the number of entities within each topological dimension of the mesh (see Chapter~\ref{sec:referencecells}). The size of the array should be equal to the topological dimension of the mesh plus one. Thus, for a mesh of tetrahedral cells, \texttt{num\_entities[0]} should contain the number of vertices, \texttt{num\_entities[1]} should contain the number of edges (if they are needed, see \texttt{ufc::dofmap::needs\_mesh\_entities} below), \texttt{num\_en\-tities[2]} should contain the number of faces, and \texttt{num\_entities[3]} should contain the number of volumes. If \texttt{d} is the topological dimension of the mesh, \texttt{num\_entities[d]} should contain the number of cells or elements. \section{The class \texttt{ufc::cell}} \index{\texttt{ufc::cell}} The class \texttt{ufc::cell} defines the data structure for a cell in a mesh. Its intended use is not as a building block in a mesh data structure, but merely as a view of specific data for a single cell. It is used to pass cell data to UFC functions with a minimal amount of assumptions on how the computational mesh is represented and stored. \subsection{The enum variable \texttt{cell\_shape}} \begin{code} shape cell_shape; \end{code} The variable \texttt{cell\_shape} should be set to the corresponding \texttt{ufc::shape} for the cell. \subsection{The integer \texttt{topological\_dimension}} \begin{code} unsigned int topological_dimension; \end{code} The integer \texttt{topological\_dimension} should be set to the topological dimension of the cell (see \texttt{ufc::mesh} above). \subsection{The integer \texttt{geometric\_dimension}} \begin{code} unsigned int geometric_dimension; \end{code} The integer \texttt{geometric\_dimension} should be set to the geometric dimension of the cell (see \texttt{ufc::mesh} above). \subsection{The array \texttt{entity\_indices}} \begin{code} unsigned int** entity_indices; \end{code} The array \texttt{entity\_indices} should contain the global indices for all entities of the cell (see Chapter~\ref{sec:referencecells}). The length of the array \texttt{entity\_indices} should be equal to the value of the variable \texttt{topological\_dimension} plus one. Thus, \texttt{entity\_indices[0]} should be an array containing the global indices of all the vertices of the cell, \texttt{entity\_indices[1]} should be an array containing the global indices of all the edges of the cell, etc. The sizes of these arrays are implicitly defined by the cell type. Note that the entity indices are not always needed for all entities of the cell. Which entities are required is specified by the \texttt{ufc::dofmap} class (see \texttt{ufc::dofmap::needs\_mesh\_entities} below). \subsection{The array \texttt{coordinates}} \begin{code} double** coordinates; \end{code} The array \texttt{coordinates} should contain the global coordinates for all vertices of the cell and thus its length should be equal to number of vertices of the cell. The length of the array \texttt{coordinates[0]} should be equal to the value of the variable \texttt{geometric\_dimension} and it should contain the $x$, $y$, \ldots coordinates of the first vertex etc. \subsection{The integer \texttt{index}} Introduced in UFC version 2.0. \begin{code} unsigned int index; \end{code} The integer \texttt{index} should contain the global index for the cell. This is a short-cut for \texttt{entity\_indices[topological\_dimension][0]}. \subsection{The integer \texttt{local\_facet}} Introduced in UFC version 2.0. \begin{code} int local_facet; \end{code} The integer \texttt{local\_facet} can be used during callbacks to the \texttt{evaluate} function of \texttt{ufc::function} to specify the local index of a facet (relative to the cell) on which the function is being evaluated. In all other cases, this variable should be set to \texttt{-1}. \subsection{The integer \texttt{mesh\_identifier}} Introduced in UFC version 2.0. \begin{code} int mesh_identifier; \end{code} The integer \texttt{mesh\_identifier} can be used during callbacks to the \texttt{evaluate} function of \texttt{ufc::function} to specify a global identifier for the mesh on which the function is being evaluated. This allows \texttt{ufc::function} subclasses to handle evaluation differently on different meshes. If not used, this variable should be set to \texttt{-1}. \section{The class \texttt{ufc::function}} \index{\texttt{ufc::function}} The class \texttt{ufc::function} is an interface for evaluation of general tensor-valued functions on the cells of a mesh. \subsection{The function \texttt{evaluate}} \begin{code} virtual void evaluate(double* values, const double* coordinates, const cell& c) const = 0; \end{code} The only function in this class is \texttt{evaluate}, which evaluates all the value components of the function at a given point in a given cell of the mesh. The output of \texttt{evaluate} should be written to the array \texttt{values}. For a scalar-valued function, a single value should be written to \texttt{values[0]}. For general tensor-valued functions, the values should be written in a flattened row-major ordering of the tensor values. Thus, for a function $f : K \rightarrow \R^{2x2}$ (where $A = f(x)$ is a $2 \times 2$ matrix), the array \texttt{values} should contain the values $A_{11}, A_{12}, A_{21}, A_{22}$ in this order. The input to \texttt{evaluate} are the coordinates of a point in a cell and the UFC view of the cell containing that point. See also the description of \texttt{ufc::finite\_element::evaluate\_dof} below. \section{The class \texttt{ufc::finite\_element}} \index{\texttt{ufc::finite\_element}} The class \texttt{ufc::finite\_element} represents a finite element in the classical Ciarlet sense~\cite{Cia78}, or rather a particular instance of a finite element for a particular choice of nodal basis functions. Thus, a \texttt{ufc::finite\_element} has functions for accessing the shape of the finite element, the dimension of the polynomial function space, the basis functions of the function space (and their derivatives), and the linear functionals defining the degrees of freedom. In addition, a \texttt{ufc::finite\_element} provides functionality for interpolation. \subsection{The function \texttt{signature}} \begin{code} virtual const char* signature() const = 0; \end{code} This function returns a signature string that uniquely identifies the finite element. This can be used to compare whether or not two given \texttt{ufc::fi\-nite\_element} objects are identical. \subsection{The function \texttt{cell\_shape}} \begin{code} virtual shape cell_shape() const = 0; \end{code} This function returns the shape of the cell the finite element is defined on. \subsection{The function \texttt{topological\_dimension}} Introduced in UFC version 2.0. \begin{code} virtual unsigned int topological_dimension() const = 0; \end{code} This function returns the topological dimension of the cell the finite element is defined on. \subsection{The function \texttt{geometric\_dimension}} Introduced in UFC version 2.0. \begin{code} virtual unsigned int geometric_dimension() const = 0; \end{code} This function returns the geometric dimension of the cell the finite element is defined on. \subsection{The function \texttt{space\_dimension}} \begin{code} virtual unsigned int space_dimension() const = 0; \end{code} This function returns the dimension of the local finite element space ($|V_h^K|$), which is equal to the number of basis functions. This should also be equal to the value of \texttt{local\_dimension()} for the corresponding \texttt{ufc::dofmap} (see below). \subsection{The function \texttt{value\_rank}} \begin{code} virtual unsigned int value_rank() const = 0; \end{code} A finite element can have general tensor-valued basis functions. The function \texttt{value\_rank} returns the rank of the value space of the basis functions. For a scalar element, this function should return zero, for vector-valued functions it should return one, for matrix-valued functions it should return two, etc. For mixed elements, this may not always make sense, for example with a tensor-vector-scalar element. Thus the value rank of a mixed element must be 1 if any of the subelements have different value ranks. \subsection{The function \texttt{value\_dimension}} \begin{code} virtual unsigned int value_dimension(unsigned int i) const = 0; \end{code} This function returns the dimension of the value space of the finite element basis functions for the given axis, where the given axis must be a number between zero and the value rank minus one. Note that the total size (number of values) of the value space is obtained as the product of \texttt{value\_dimension(i)} for $0 \le i <$ \texttt{value\_rank()}. For a mixed element with value rank 1 Since the value rank of a mixed element must be 1 if any of the subelements have different value ranks, \texttt{value\_dimension(0)} is then the total value size of all the subelements. \subsection{The function \texttt{evaluate\_basis}} \begin{code} virtual void evaluate_basis(unsigned int i, double* values, const double* coordinates, const cell& c) const = 0; \end{code} This function evaluates basis function \texttt{i} at the given \texttt{coordinates} within the given cell \texttt{c}, and stores the values in the array \texttt{values}. The size of the output array should be equal to size of the value space (see \texttt{value\_dimension} above). The point defined by \texttt{coordinates} should be located inside the cell~\texttt{c}. If the point is located outside the cell, then the result is undefined.~\footnote{In particular, the basis functions generated by FIAT/FFC are undefined along the line $y = 1$ on the UFC reference element since the collapsed cube mapping used by FIAT is singular along that line.} \subsection{The function \texttt{evaluate\_basis\_all}} Introduced in UFC version 1.1. \begin{code} virtual void evaluate_basis_all(double* values, const double* coordinates, const cell& c) const = 0; \end{code} As \texttt{evaluate\_basis}, but evaluates all basis functions at once, which can be implemented much more effectively than multiple calls to \texttt{evaluate\_basis}. The size of the output array should be equal to size of the value space times the number of basis functions. The computed values for a single basis function are placed contiguously in the array. Note that just as for \texttt{evaluate\_basis}, the point defined by \texttt{coordinates} should be located inside the cell~\texttt{c}. The result is otherwise undefined. \subsection{The function \texttt{evaluate\_basis\_derivatives}} \begin{code} virtual void evaluate_basis_derivatives(unsigned int i, unsigned int n, double* values, const double* coordinates, const cell& c) const = 0; \end{code} This function evaluates all order \texttt{n} derivatives of basis function \texttt{i} at the given \texttt{coordinates} within the given \texttt{cell}, and stores the values in the array \texttt{values}. Derivatives may be obtained up to the polynomial degree of the finite element function space with higher degree derivatives evaluating to zero. The number of derivatives is given by $d^n$ where $d$ is the geometric dimension of the cell. For $n = 1$, $d = 3$, the order of the derivatives is naturally $\partial/\partial x$, $\partial/\partial y$, $\partial/\partial z$. For $n = 2$, $d = 3$, the order of the derivatives is $\frac{\partial^2}{\partial x\partial x}, \frac{\partial^2}{\partial x\partial y}, \ldots, \frac{\partial^2}{\partial z\partial z}$. Thus, the derivatives are stored in a flattened row-major ordering based on the derivative spatial dimensions. For tensor-valued basis functions, the ordering of derivatives is row-major based on the value space dimensions followed by the derivative spatial dimensions. Note that just as for \texttt{evaluate\_basis}, the point defined by \texttt{coordinates} should be located inside the cell~\texttt{c}. The result is otherwise undefined. \subsection{The function \texttt{evaluate\_basis\_derivatives\_all}} Introduced in UFC version 1.1. \begin{code} virtual void evaluate_basis_derivatives_all(unsigned int n, double* values, const double* coordinates, const cell& c) const = 0; \end{code} As \texttt{evaluate\_basis\_derivatives}, but evaluated for all basis functions at once, which can be implemented much more effectively than multiple calls to \texttt{evaluate\_basis\_derivatives}. The size of the output array should be equal to the corresponding size defined for \texttt{evaluate\_basis\_derivatives} times the number of basis functions. The computed values for a single basis function are placed contiguously in the array. Note that just as for \texttt{evaluate\_basis}, the point defined by \texttt{coordinates} should be located inside the cell~\texttt{c}. The result is otherwise undefined. \subsection{The function \texttt{evaluate\_dof}} \begin{code} virtual double evaluate_dof(unsigned int i, const function& f, const cell& c) const = 0; \end{code} This function evaluates and returns the value of the degree of freedom \texttt{i} (which is a linear functional) on the given function \texttt{f}. For example, the degrees of freedom for Lagrange finite elements are given by evaluation of \texttt{f} at a set of points. Other examples of degrees of freedom include weighted integrals over facets or normal components on facets. \subsection{The function \texttt{evaluate\_dofs}} Introduced in UFC version 1.1. \begin{code} virtual void evaluate_dofs(double* values, const function& f, const cell& c) const = 0; \end{code} Vectorized version of \texttt{evaluate\_dof} for efficiency, returning the values of all degrees of freedom in the array \texttt{values}. \subsection{The function \texttt{interpolate\_vertex\_values}} \begin{code} virtual void interpolate_vertex_values(double* vertex_values, const double* dof_values, const cell& c) const = 0; \end{code} This function takes as input the array \texttt{dof\_values} containing the expansion coefficients for some function in the nodal basis and computes the values of that function at the vertices of the given cell, storing those values in the array \texttt{vertex\_values}. If the function is tensor-valued, the values are stored in the array \texttt{vertex\_values} row-major on the list of vertices followed by the row-major ordering of the tensor values as described above. \subsection{The function \texttt{map\_from\_reference\_cell}} Introduced in UFC version 2.0. \begin{code} virtual void map_from_reference_cell(double* x, const double* xhat, const cell& c) = 0; \end{code} This function maps a given coordinate \texttt{xhat} on the reference cell to a coordinate \texttt{x} on a given \texttt{ufc::cell}. \subsection{The function \texttt{map\_to\_reference\_cell}} Introduced in UFC version 2.0. \begin{code} virtual void map_to_reference_cell(double* xhat, const double* x, const cell& c) = 0; \end{code} This function maps a given coordinate \texttt{x} on a given \texttt{ufc::cell} to a coordinate \texttt{xhat} on the reference cell. \subsection{The function \texttt{num\_sub\_elements}} \begin{code} virtual unsigned int num_sub_elements() const = 0; \end{code} This function returns the number of subelements for a nested (mixed) element. For simple elements (non-nested), this function should return one. A nested element is an element that is defined from a set of elements by taking the direct sum (tensor product) of the polynomial spaces of those elements. For example, the basis functions $\psi_1, \psi_2, \ldots, \psi_m$ of a vector-valued Lagrange element may be constructed from a scalar Lagrange element by repeating the basis functions $\phi_1, \phi_2, \ldots, \phi_n$ of the scalar element and padding with zeros: $\psi_1 = (\phi_1, 0), \psi_2 = (\phi_2, 0), \ldots, \psi_n = (\phi_n, 0), \psi_{n+1} = (0, \phi_1), \psi_{n+2} = (0, \phi_2), \ldots$. Finite elements may be nested at arbitrary depth. For example, a mixed Taylor--Hood element may be created by combining a vector-valued quadratic Lagrange finite element with a scalar linear Lagrange finite element, and the vector-valued element may in turn be created by combining a set of scalar quadratic Lagrange elements. \subsection{The function \texttt{create\_sub\_element}} \begin{code} virtual finite_element* create_sub_element(unsigned int i) const = 0; \end{code} This factory function constructs a \texttt{ufc::finite\_element} object for subelement \texttt{i}. The argument \texttt{i} must be an integer between zero and the number of subelements (\texttt{num\_sub\_elements}) minus one. If the element is simple (non-nested), this function returns a zero pointer. The caller is responsible for deleting the returned object. Note that in earlier versions of UFC, this function returned a copy of the element itself in the case of a simple element. To create a copy of the element, use the \texttt{create} function instead. \subsection{The function \texttt{create}} Introduced in UFC version 2.0. \begin{code} virtual finite_element* create() const = 0; \end{code} This factory function creates a new instance of the generated \texttt{ufc::finite\_\-element} subclass. \section{The class \texttt{ufc::dofmap}} \index{\texttt{ufc::dofmap}} This class represents the local-to-global mapping of degrees of freedom (dofs), or rather one particular instance of such a mapping (there are many possible local-to-global mappings) as defined in Equation \eqref{eq:iota_K}. The most central function of this class is \texttt{tabulate\_dofs}, which tabulates the local-to-global mapping from the degree of freedom indices on a local cell to a global vector of degree of freedom indices. \subsection{The function \texttt{signature}} \begin{code} virtual const char* signature() const = 0; \end{code} This function returns a signature string that uniquely identifies the dofmap. This can be used to compare whether or not two given \texttt{ufc::dofmap} objects are identical. (This may be used to optimize the assembly of forms by caching previously computed dofmaps.) \subsection{The function \texttt{needs\_mesh\_entities}} \begin{code} virtual bool needs_mesh_entities(unsigned int d) const = 0; \end{code} This function returns true if the \texttt{ufc::dofmap} requires mesh entities of topological dimension \texttt{d} to be available in \texttt{ufc::cell} arguments. Thus, if this function returns false for a given value of \texttt{d}, then the array \texttt{entity\_indices[d]} of the \texttt{ufc::cell} data structure will not be used during calls to \texttt{ufc::dofmap} functions. In those cases, the array \texttt{entity\_indices[d]} may be set to zero. This may be used to check which entities must be generated to tabulate the local-to-global mapping. For example, linear Lagrange elements will only need to know the vertices of each cell in the mesh, while quadratic Lagrange elements will also need to know the edges of each cell in the mesh. \subsection{The function \texttt{init\_mesh}} \begin{code} virtual bool init_mesh(const mesh& mesh) = 0; \end{code} This function initializes the dofmap for a given mesh. If it returns true, calls to \texttt{init\_cell} and \texttt{init\_cell\_finalize} are required to complete the initialization. The function \texttt{global\_dimension} be may only be called when the initialization is complete. \subsection{The function \texttt{init\_cell}} \begin{code} virtual void init_cell(const mesh& m, const cell& c) = 0; \end{code} For \texttt{ufc::dofmap} objects where \texttt{init\_mesh} returns true, this function must be called for each cell in the mesh to initialize the dofmap. \subsection{The function \texttt{init\_cell\_finalize}} \begin{code} virtual void init_cell_finalize() = 0; \end{code} For \texttt{ufc::dofmap} objects where \texttt{init\_mesh} returns true, this function must be called after \texttt{init\_cell} is called for each cell in the mesh to complete initialization of the dofmap. \subsection{The function \texttt{topological\_dimension}} Introduced in UFC version 1.1. \begin{code} virtual unsigned int topological_dimension() const = 0; \end{code} This function returns the topological dimension of the associated cell shape. \subsection{The function \texttt{geometric\_dimension}} Introduced in UFC version 2.0. \begin{code} virtual unsigned int geometric_dimension() const = 0; \end{code} This function returns the geometric dimension of the associated cell shape. \subsection{The function \texttt{global\_dimension}} \begin{code} virtual unsigned int global_dimension() const = 0; \end{code} This function returns the dimension of the global finite element space on the mesh that the \texttt{ufc::dofmap} has been initialized for. The result of calling this function before the initialization is complete is undefined. \subsection{The function \texttt{local\_dimension}} Changed in version 1.2. \begin{code} virtual unsigned int local_dimension(const cell& c) const = 0; \end{code} This function returns the dimension of the local finite element space on a given cell. \subsection{The function \texttt{max\_local\_dimension}} Introduced in UFC version 1.2. \begin{code} virtual unsigned int max_local_dimension() const = 0; \end{code} This function returns the maximum dimension of the local finite element space on a single cell. \subsection{The function \texttt{num\_facet\_dofs}} \begin{code} virtual unsigned int num_facet_dofs() const = 0; \end{code} This function returns the number of dofs associated with a single facet of a cell, including all dofs associated with mesh entities of lower dimension incident with the facet. For example on a tetrahedron this will include dofs associated with edges and vertices of the triangle face. This is also the number of dofs that should be set if a Dirichlet boundary condition is applied to a single facet. \subsection{The function \texttt{num\_entity\_dofs}} Introduced in UFC version 1.1. \begin{code} virtual unsigned int num_entity_dofs(unsigned int d) const = 0; \end{code} This function returns the number of dofs associated with a single mesh entity of dimension \texttt{d} in a cell, not including dofs associated with incident entities of lower dimension (unlike \texttt{num\_facet\_dofs()}). It is assumed that all cells of the mesh have the same number of degrees of freedom on each mesh entity of the same dimension. \subsection{The function \texttt{tabulate\_dofs}} \begin{code} virtual void tabulate_dofs(unsigned int* dofs, const mesh& m, const cell& c) const = 0; \end{code} This function tabulates the global dof indices corresponding to each dof on the given cell. The size of the output array \texttt{dofs} should be equal to the value returned by \texttt{local\_dimension()}. \subsection{The function \texttt{tabulate\_facet\_dofs}} \begin{code} virtual void tabulate_facet_dofs(unsigned int* dofs, unsigned int facet) const = 0; \end{code} This function tabulates the local dof indices associated with a given local facet, including all dofs associated with mesh entities of lower dimension incident with the facet. The size of the output array \texttt{dofs} should equal the value returned by \texttt{num\_facet\_dofs}. \subsection{The function \texttt{tabulate\_entity\_dofs}} Introduced in UFC version 1.1. \begin{code} virtual void tabulate_entity_dofs(unsigned int* dofs, unsigned int d, unsigned int i) const = 0; \end{code} This function tabulates the local dof indices associated with a given local mesh entity \texttt{i} of dimension \texttt{d}, i.e. mesh entity (d, i), not including dofs associated with incident entities of lower dimension (unlike \texttt{tabulate\_facet\_dofs}). The size of the output array \texttt{dofs} should equal the value returned by the function \texttt{num\_entity\_dofs(d)}. As an example, calling \texttt{tabulate\_entity\_dofs} for a face (d = 2) should yield only the dofs associated with the face that are not associated with vertices and edges. Thus \texttt{tabulate\_entity\_dofs} can be used to build connectivity information. \subsection{The function \texttt{tabulate\_coordinates}} \begin{code} virtual void tabulate_coordinates(double** coordinates, const cell& c) const = 0; \end{code} This function tabulates the coordinates for each dof on the given cell. For Lagrange elements, this function will tabulate a set of points on the given cell such that the dofs of the finite element are given by evaluation at those points. For elements that do not have a direct relationship between coordinates and dofs, an attempt should be made at a sensible implementation of this function. For example, if a dof is defined as the integral over a facet, the midpoint of the facet can be used. If no other choice makes sense, the midpoint of the cell can be used as a last resort. This function must thus be used with care if non-Lagrangian elements are used. The size of the output array \texttt{coordinates} should be equal to the value returned by \texttt{local\_dimension()} and the size of each subarray \texttt{coordi\-nates[0]}, \texttt{coordinates[1]} etc should be equal to the geometric dimension of the mesh, which can be obtained with the function \texttt{dofmap::geometric\_\-dimension()}. \subsection{The function \texttt{num\_sub\_dofmaps}} \begin{code} virtual unsigned int num_sub_dofmaps() const = 0; \end{code} This function returns the number of sub-dofmaps for a nested (mixed) element. For a discussion on the subelement concept, see the documentation of the function \texttt{ufc::finite\_element::num\_sub\_elements}. For simple elements (non-nested), this function should return one. \subsection{The function \texttt{create\_sub\_dofmap}} \begin{code} virtual dofmap* create_sub_dofmap(unsigned int i) const = 0; \end{code} This factory function constructs a \texttt{ufc::dofmap} object for subelement \texttt{i}. The argument \texttt{i} must be a number between zero and the number of sub-dofmaps (\texttt{num\_sub\_dofmaps}) minus one. If the dofmap is simple (non-nested), this function returns a zero pointer. The caller is responsible for deleting the returned object. Note that in earlier versions of UFC, this function returned a copy of the dofmap itself in the case of a simple element. To create a copy of the element, use the \texttt{create} function instead. \subsection{The function \texttt{create}} Introduced in UFC version 2.0. \begin{code} virtual dofmap* create() const = 0; \end{code} This factory function creates a new instance of the generated \texttt{ufc::dofmap} subclass. \section{The integral classes} As described in Section~\ref{sec:assembly}, and in particular Equation \eqref{eq:form_integrals}, the global sparse tensor (the ``stiffness matrix'') representing a given form (differential operator) may be assembled by summing the contributions from the local cells, exterior facets and interior facets of the mesh. These contributions are represented in the UFC interface by the classes \texttt{cell\_integral}, \texttt{exterior\_facet\_integral} and \texttt{interior\_facet\_integral}. Each of these three integral classes has a single function \texttt{tabulate\_tensor} which may be called to compute the corresponding local contribution (cell tensor, exterior facet tensor or interior facet tensor). \section{The class \texttt{ufc::cell\_integral}} \index{\texttt{ufc::cell\_integral}} The class \texttt{ufc::cell\_integral} represents the integral of a form over a local cell in a finite element mesh. It has a single function \texttt{tabulate\_tensor} which may be called to tabulate the values of the cell tensor for a given cell. \subsection{The function \texttt{tabulate\_tensor}} \index{\texttt{tabulate\_tensor}} \begin{code} virtual void tabulate_tensor(double* A, const double * const * w, const cell& c) const = 0; \end{code} This function tabulates the values of the cell tensor for a form into the given array \texttt{A}. The size of this array should be equal to the product of the local space dimensions for the set of finite element function spaces corresponding to the arguments of the form. For example, when computing the matrix for a bilinear form defined on piecewise linear scalar elements on triangles, the space dimension of the local finite element is three and so the size of the array \texttt{A} should be $3 \times 3 = 9$. The array \texttt{w} should contain the expansion coefficients for all \emph{coefficients} of the form in the finite element nodal basis for each corresponding function space. Thus, the size of the array \texttt{w} should be equal to the number of coefficients~$n$, and the size of each each array \texttt{w[0]}, \texttt{w[1]} etc should be equal to the space dimension of the corresponding local finite element space. \subsection{The function \texttt{tabulate\_tensor} (quadrature version)} \index{\texttt{tabulate\_tensor} (quadrature version)} Introduced in UFC version version 2.0 \begin{code} virtual void tabulate_tensor(double* A, const double * const * w, const cell& c, unsigned int num_quadrature_points, const double * const * quadrature_points, const double* quadrature_weights) const = 0; \end{code} This function is identical to \texttt{tabulate\_tensor} described above but computes the integral over the cell using the given set of quadrature points and weights. Note that the quadrature points should be specified on the reference cell. By a careful choice of quadrature points, this function can be used to integrate over subsets of cells. \section{The class \texttt{ufc::exterior\_facet\_integral}} \index{\texttt{ufc::exterior\_facet\_integral}} The class \texttt{ufc::exterior\_facet\_integral} represents the integral of a form over a local exterior facet (boundary facet) in a finite element mesh. It has a single function \texttt{tabulate\_tensor} which may be called to tabulate the values of the exterior facet tensor for a given facet. \subsection{The function \texttt{tabulate\_tensor}} \index{\texttt{tabulate\_tensor}} \begin{code} virtual void tabulate_tensor(double* A, const double * const * w, const cell& c, unsigned int facet) const = 0; \end{code} The arrays \texttt{A} and \texttt{w} have the same function and should have the same sizes as described in the documentation for \texttt{cell\_integral::tabulate\_tensor}. Thus, the values of the exterior facet integral will be tabulated into the array \texttt{A} and the nodal basis expansions of all coefficients should be provided in the array \texttt{w}. The additional argument \texttt{facet} should specify the local number of the facet with respect to its (single) incident cell. Thus, when the facet is an edge of a triangle, the argument \texttt{facet} should be an integer between zero and two (0, 1, 2) and when the facet is a facet of a tetrahedron, the argument \texttt{facet} should be an integer between zero and three (0, 1, 2, 3). \subsection{The function \texttt{tabulate\_tensor} (quadrature version)} \index{\texttt{tabulate\_tensor} (quadrature version)} Introduced in version version 2.0 \begin{code} virtual void tabulate_tensor(double* A, const double * const * w, const cell& c, unsigned int num_quadrature_points, const double * const * quadrature_points, const double* quadrature_weights) const = 0; \end{code} This function is identical to \texttt{tabulate\_tensor} described above but computes the integral over the cell using the given set of quadrature points and weights. Note that the quadrature points should be specified on the reference cell. By a careful choice of quadrature points, this function can be used to integrate over subsets of facets. \section{The class \texttt{ufc::interior\_facet\_integral}} \index{\texttt{ufc::interior\_facet\_integral}} The class \texttt{ufc::interior\_facet\_integral} represents the integral of a form over a local interior facet in a finite element mesh. It has a single function \texttt{tabulate\_tensor} which may be called to tabulate the values of the interior facet tensor for a given facet. \subsection{The function \texttt{tabulate\_tensor}} \begin{code} virtual void tabulate_tensor(double* A, const double * const * w, const cell& c0, const cell& c1, unsigned int facet0, unsigned int facet1) const = 0; \end{code} Just as for the \texttt{cell\_integral} and \texttt{exterior\_facet\_integral} classes, the \texttt{tabulate\_tensor} function for the class \texttt{interior\_facet\_integral} tabulates the values of the local (interior facet) tensor into the array \texttt{A}, given the nodal basis expansions of the form coefficients in the array \texttt{w}. However, the interior facet tensor contains contributions from the two incident cells of an interior facet and thus the dimensions of these arrays are different. On each interior facet, the two incident (neighboring) cells form a ``macro cell'' consisting of the total set of local basis functions on the two cells. The set of basis functions on the macro element is obtained by extending the basis functions on each of the two cells by zero to the macro cell. Thus, the space dimension of the finite element function space on the macro element is twice the size of the finite element function space on a single cell. The ordering of basis functions and degrees of freedom on the macro cell is obtained by first enumerating the basis functions and degrees of freedom on one of the two cells and then the basis functions and degrees of freedom on the second cell. Thus the size of the array \texttt{A} should be equal to the product of twice the local space dimensions for the set of finite element function spaces corresponding to the arguments of the form. For example, when computing the matrix for a bilinear form defined on piecewise linear elements on triangles, the space dimension of the local finite element is three and so the size of the array \texttt{A} should be $6 \times 6 = 36$. Similarly, the array \texttt{w} should contain the expansion coefficients for all \emph{coefficients} of the form in the finite element nodal basis for each corresponding function space on the macro cell. Thus, the size of the array \texttt{w} should be equal to the number of coefficients~$n$ and the size of each each array \texttt{w[0]}, \texttt{w[1]} etc should be equal to twice the space dimension of the corresponding local finite element space. The additional arguments \texttt{facet0} and \texttt{facet1} should specify the local number of the facet with respect to its two incident cells. Thus, when the facet is an edge of a triangle, each of these arguments may be an integer between zero and two (0, 1, 2) and when the facet is a face of a tetrahedron, each of these arguments may be an integer between zero and three (0, 1, 2, 3). \subsection{The function \texttt{tabulate\_tensor} (quadrature version)} \index{\texttt{tabulate\_tensor} (quadrature version)} Introduced in version version 2.0 \begin{code} virtual void tabulate_tensor(double* A, const double * const * w, const cell& c, unsigned int num_quadrature_points, const double * const * quadrature_points, const double* quadrature_weights) const = 0; \end{code} This function is identical to \texttt{tabulate\_tensor} described above but computes the integral over the cell using the given set of quadrature points and weights. Note that the quadrature points should be specified on the reference cell. By a careful choice of quadrature points, this function can be used to integrate over subsets of facets. \section{The class \texttt{ufc::form}} \index{\texttt{ufc::form}} The \texttt{ufc::form} class is the central part of the UFC interface and it represents a form \begin{equation} a = a(v_1, \ldots, v_r; w_1, \ldots, w_n), \end{equation} defined on the product space $V_h^1 \times V_h^2 \times \cdots \times V_h^r \times W_h^1 \times W_h^2 \times \cdots \times W_h^n$ of two sets $\{V_h^j\}_{j=1}^r, \{W_h^j\}_{j=1}^n$ of finite element function spaces on a triangulation $\mathcal{T}$ of a domain $\Omega \subset \R^d$. A \texttt{ufc::form} provides functions for accessing the rank~$r$ and number of coefficients~$n$ for a form, and factory functions for creating UFC objects for the corresponding cell integrals, exterior facet integrals, interior facet integrals, and all associated finite elements and dofmaps (local-to-global mappings). \subsection{The function \texttt{signature}} \begin{code} virtual const char* signature() const = 0; \end{code} This function returns a signature string that uniquely identifies the form. This can be used to compare whether or not two given \texttt{ufc::form} objects are identical. \subsection{The function \texttt{rank}} \begin{code} virtual unsigned int rank() const = 0; \end{code} This function returns the rank~$r$ of the global tensor generated by the form (the arity of the form). \subsection{The function \texttt{num\_coefficients}} \begin{code} virtual unsigned int num_coefficients() const = 0; \end{code} This function returns the number of coefficients~$n$ for the form. Note that all integral terms of a form must have the same coefficients, even if not all coefficients are present in each term of the form. \subsection{The function \texttt{num\_cell\_domains}} \begin{code} virtual unsigned int num_cell_domains() const = 0; \end{code} This function returns the number of different cell domains for the form. A form may have an arbitrary number of integrals over disjoint subdomains of the mesh. \subsection{The function \texttt{num\_exterior\_facet\_domains}} \begin{code} virtual unsigned int num_exterior_facet_domains() const = 0; \end{code} This function returns the number of different exterior facet domains for the form. A form may have an arbitrary number of integrals over disjoint subdomains of the mesh boundary. \subsection{The function \texttt{num\_interior\_facet\_domains}} \begin{code} virtual unsigned int num_interior_facet_domains() const = 0; \end{code} This function returns the number of different interior facet domains for the form. A form may have an arbitrary number of integrals over disjoint subsets of the interior facets of the mesh. \subsection{The function \texttt{create\_finite\_element}} \begin{code} virtual finite_element* create_finite_element(unsigned int i) const = 0; \end{code} This factory function constructs a \texttt{ufc::finite\_element} object for form argument \texttt{i}. A form with rank~$r$ and number of coefficients~$n$ has $r + n$ arguments, so this function returns the finite element object for tensor axis $i$ if $i < r$, or the finite element for coefficient $i - r$ if $i \geq r$. The caller is responsible for deleting the returned object. \subsection{The function \texttt{create\_dofmap}} \begin{code} virtual dofmap* create_dofmap(unsigned int i) const = 0; \end{code} This factory function constructs a \texttt{ufc::dofmap} object for form argument \texttt{i}. A form with rank~$r$ and number of coefficients~$n$ has $r + n$ arguments, so this function returns the dofmap object for tensor axis $i$ if $i < r$, or the dofmap for coefficient $i - r$ if $i \geq r$. The caller is responsible for deleting the returned object. \subsection{The function \texttt{create\_cell\_integral}} \begin{code} virtual cell_integral* create_cell_integral(unsigned int i) const = 0; \end{code} This factory function constructs a \texttt{cell\_integral} object for cell domain \texttt{i}. The caller is responsible for deleting the returned object. If the integral evaluates to zero, this function may return a null pointer. \subsection{The function \texttt{create\_exterior\_facet\_integral}} \begin{code} virtual exterior_facet_integral* create_exterior_facet_integral(unsigned int i) const = 0; \end{code} This factory function constructs an \texttt{exterior\_facet\_integral} object for exterior facet domain \texttt{i}. The caller is responsible for deleting the returned object. If the integral evaluates to zero, this function may return a null pointer. \subsection{The function \texttt{create\_interior\_facet\_integral}} \begin{code} virtual interior_facet_integral* create_interior_facet_integral(unsigned int i) const = 0; \end{code} This factory function constructs an \texttt{interior\_facet\_integral} object for interior facet domain \texttt{i}. The caller is responsible for deleting the returned object. If the integral evaluates to zero, this function may return a null pointer. ffc-1.6.0/ufc-merge-into-ffc/doc/manual/chapters/interface_cpp.tex000066400000000000000000000003551255571034100250160ustar00rootroot00000000000000\chapter{C++ Interface} Below follows a verbatim copy of the complete UFC interface which is specified in the header file~\texttt{ufc.h}. \scriptsize \VerbatimInput[frame=single,rulecolor=\color{blue}]{../../src/ufc/ufc.h} \normalsize ffc-1.6.0/ufc-merge-into-ffc/doc/manual/chapters/introduction.tex000066400000000000000000000101311255571034100247260ustar00rootroot00000000000000\chapter{Introduction} \label{sec:introduction} Large parts of a finite element program are similar from problem to problem and can therefore be coded as a general, reusable library. Mesh data structures, linear algebra and finite element assembly are examples of operations that are naturally coded in a problem-independent way and made available in reusable libraries~\cite{www:fenics,www:petsc,www:sundance,www:deal.II,www:trilinos,www:diffpack}. However, some parts of a finite element program are difficult to code in a problem-independent way. In particular, this includes the evaluation of the \emph{element tensor} (the `element stiffness matrix'), that is, the evaluation of the local contribution from a finite element to a global sparse tensor (the ``stiffness matrix'') representing a discretized differential operator. These parts must thus be implemented by the application programmer for each specific combination of differential equation and discretization (finite element spaces). \index{form compilers} \index{FFC} \index{SyFi} However, domain-specific compilers such as FFC~\cite{www:ffc,logg:article:07,logg:article:09,logg:article:10,logg:article:11} and SyFi~\cite{www:syfi} make it possible to automatically generate the code for the evaluation of the element tensor. These \emph{form compilers} accept as input a high-level description of a finite element variational form and generate low-level code code for efficient evaluation of the element tensor and associated quantities. It thus becomes important to specify the \emph{interface} between form compilers and finite element assemblers such that the code generated by FFC, SyFi and other form compilers can be used to assemble finite element matrices and vectors (and in general tensors). \section{Unified Form-assembly Code} UFC (Unified Form-assembly Code) is a unified framework for finite element assembly. More precisely, it defines a fixed interface for communicating low level routines (functions) for evaluating and assembling finite element variational forms. The UFC interface consists of a single header file \texttt{ufc.h} that specifies a C++ interface that must be implemented by code that complies with the UFC specification. Both FFC (since version 0.4.0) and SyFi (since version 0.3.4) generate code that complies with the UFC specification. Thus, code generated by FFC and SyFi may be used interchangeably by any UFC-based finite element assembler, such as DOLFIN~\cite{www:dolfin}. \section{Aim and scope} The UFC interface has been designed to make a minimal amount of assumptions on the form compilers generating the UFC code and the assemblers built on top of the UFC specification. Thus, the UFC specification provides a minimal amount of abstractions and data structures. Programmers wishing to implement the UFC specification will typically want to create system-specific (but simple) wrappers for the generated code. Few assumptions have also been made on the underlying finite element methodology. The current specification is limited to affinely mapped cells, but does not restrict the mapping of finite element function spaces. Thus, UFC code may be generated for elements where basis functions are transformed from the reference cell by the affine mapping, as well as for elements where the basis functions must be transformed by the Piola mapping. UFC code has been successfully generated and used in finite element codes for standard continuous Galerkin methods (Lagrange finite elements of arbitrary order), discontinuous Galerkin methods (including integrals of jumps and averages over interior facets) and mixed methods (including Brezzi--Douglas--Marini and Raviart--Thomas elements). \section{Outline} In the next section, we give an overview of finite element assembly and explain how the code generated by form compilers may be used as the basic building blocks in the assembly algorithm. We then present the UFC interface in detail in Section~\ref{sec:interface}. In Section~\ref{sec:referencecells} and Section~\ref{sec:numbering}, we define the reference cells and numbering conventions that must be followed by UFC-based form compilers and assemblers. ffc-1.6.0/ufc-merge-into-ffc/doc/manual/chapters/license.tex000066400000000000000000000002201255571034100236250ustar00rootroot00000000000000\chapter{License} \index{license} The UFC specification, and in particular the header file \texttt{ufc.h}, is released into the public domain. ffc-1.6.0/ufc-merge-into-ffc/doc/manual/chapters/numbering.tex000066400000000000000000000001361255571034100241770ustar00rootroot00000000000000\chapter{Numbering of mesh entities} \label{sec:numbering} \input{chapters/numbering_common} ffc-1.6.0/ufc-merge-into-ffc/doc/manual/chapters/numbering_common.tex000066400000000000000000000337701255571034100255610ustar00rootroot00000000000000\index{numbering} The UFC specification dictates a certain numbering of the vertices, edges etc. of the cells of a finite element mesh. First, an \emph{ad hoc} numbering is picked for the vertices of each cell. Then, the remaining entities are ordered based on a simple rule, as described in detail below. \section{Basic concepts} \index{mesh entity} \index{topological dimension} The topological entities of a cell (or mesh) are referred to as \emph{mesh entities}. A mesh entity can be identified by a pair $(d, i)$, where $d$ is the topological dimension of the mesh entity and $i$ is a unique index of the mesh entity. Mesh entities are numbered within each topological dimension from $0$ to $n_d-1$, where $n_d$ is the number of mesh entities of topological dimension $d$. For convenience, mesh entities of topological dimension $0$ are referred to as \emph{vertices}, entities of dimension $1$ as \emph{edges}, entities of dimension $2$ as \emph{faces}, entities of \emph{codimension} $1$ as \emph{facets} and entities of codimension $0$ as \emph{cells}. These concepts are summarized in Table~\ref{tab:entities}. Thus, the vertices of a tetrahedron are identified as $v_0 = (0, 0)$, $v_1 = (0, 1)$ and $v_2 = (0, 2)$, the edges are $e_0 = (1, 0)$, $e_1 = (1, 1)$, $e_2 = (1, 2)$, $e_3 = (1, 3)$, $e_4 = (1, 4)$ and $e_5 = (1, 5)$, the faces (facets) are $f_0 = (2, 0)$, $f_1 = (2, 1)$, $f_2 = (2, 2)$ and $f_3 = (2, 3)$, and the cell itself is $c_0 = (3, 0)$. \begin{table} \linespread{1.2}\selectfont \begin{center} \begin{tabular}{|l|c|c|} \hline Entity & Dimension & Codimension \\ \hline Vertex & $0$ & -- \\ Edge & $1$ & -- \\ Face & $2$ & -- \\ & & \\ Facet & -- & $1$ \\ Cell & -- & $0$ \\ \hline \end{tabular} \caption{Named mesh entities.} \label{tab:entities} \end{center} \end{table} \section{Numbering of vertices} \index{vertex numbering} For simplicial cells (intervals, triangles and tetrahedra) of a finite element mesh, the vertices are numbered locally based on the corresponding global vertex numbers. In particular, a tuple of increasing local vertex numbers corresponds to a tuple of increasing global vertex numbers. This is illustrated in Figure~\ref{fig:numbering_example_triangles} for a mesh consisting of two triangles. \begin{figure}[htbp] \begin{center} \psfrag{v0}{$v_0$} \psfrag{v1}{$v_1$} \psfrag{v2}{$v_2$} \psfrag{0}{$0$} \psfrag{1}{$1$} \psfrag{2}{$2$} \psfrag{3}{$3$} \includegraphics[width=8cm]{eps/numbering_example_triangles.eps} \caption{The vertices of a simplicial mesh are numbered locally based on the corresponding global vertex numbers.} \label{fig:numbering_example_triangles} \end{center} \end{figure} For non-simplicial cells (quadrilaterals and hexahedra), the numbering is arbitrary, as long as each cell is isomorphic to the corresponding reference cell by matching each vertex with the corresponding vertex in the reference cell. This is illustrated in Figure~\ref{fig:numbering_example_quadrilaterals} for a mesh consisting of two quadrilaterals. \begin{figure}[htbp] \begin{center} \psfrag{v0}{$v_0$} \psfrag{v1}{$v_1$} \psfrag{v2}{$v_2$} \psfrag{v3}{$v_3$} \psfrag{0}{$0$} \psfrag{1}{$1$} \psfrag{2}{$2$} \psfrag{3}{$3$} \psfrag{4}{$4$} \psfrag{5}{$5$} \includegraphics[width=8cm]{eps/numbering_example_quadrilaterals.eps} \caption{The local numbering of vertices of a non-simplicial mesh is arbitrary, as long as each cell is isomorphic to the reference cell by matching each vertex to the corresponding vertex of the reference cell.} \label{fig:numbering_example_quadrilaterals} \end{center} \end{figure} \section{Numbering of other mesh entities} When the vertices have been numbered, the remaining mesh entities are numbered within each topological dimension based on a \emph{lexicographical ordering} of the corresponding ordered tuples of \emph{non-incident vertices}. As an illustration, consider the numbering of edges (the mesh entities of topological dimension one) on the reference triangle in Figure~\ref{fig:orderingexample,triangle}. To number the edges of the reference triangle, we identify for each edge the corresponding non-incident vertices. For each edge, there is only one such vertex (the vertex opposite to the edge). We thus identify the three edges in the reference triangle with the tuples $(v_0)$, $(v_1)$ and $(v_2)$. The first of these is edge $e_0$ between vertices $v_1$ and $v_2$ opposite to vertex $v_0$, the second is edge $e_1$ between vertices $v_0$ and $v_2$ opposite to vertex $v_1$, and the third is edge $e_2$ between vertices $v_0$ and $v_1$ opposite to vertex $v_2$. Similarly, we identify the six edges of the reference tetrahedron with the corresponding non-incident tuples $(v_0, v_1)$, $(v_0, v_2)$, $(v_0, v_3)$, $(v_1, v_2)$, $(v_1, v_3)$ and $(v_2, v_3)$. The first of these is edge $e_0$ between vertices $v_2$ and $v_3$ opposite to vertices $v_0$ and $v_1$ as shown in Figure~\ref{fig:orderingexample,tetrahedron}. \begin{figure}[htbp] \begin{center} \psfrag{v0}{$v_0$} \psfrag{v1}{$v_1$} \psfrag{v2}{$v_2$} \psfrag{e0}{$e_0$} \includegraphics[width=5cm]{eps/ordering_example_triangle.eps} \caption{Mesh entities are ordered based on a lexicographical ordering of the corresponding ordered tuples of non-incident vertices. The first edge $e_0$ is non-incident to vertex $v_0$.} \label{fig:orderingexample,triangle} \end{center} \end{figure} \begin{figure}[htbp] \begin{center} \psfrag{v0}{$v_0$} \psfrag{v1}{$v_1$} \psfrag{v2}{$v_2$} \psfrag{v3}{$v_3$} \psfrag{e0}{$e_0$} \includegraphics[width=5cm]{eps/ordering_example_tetrahedron.eps} \caption{Mesh entities are ordered based on a lexicographical ordering of the corresponding ordered tuples of non-incident vertices. The first edge $e_0$ is non-incident to vertices $v_0$ and $v_1$.} \label{fig:orderingexample,tetrahedron} \end{center} \end{figure} \subsection{Relative ordering} The relative ordering of mesh entities with respect to other incident mesh entities follows by sorting the entities by their (global) indices. Thus, the pair of vertices incident to the first edge $e_0$ of a triangular cell is $(v_1, v_2)$, not $(v_2, v_1)$. Similarly, the first face $f_0$ of a tetrahedral cell is incident to vertices $(v_1, v_2, v_3)$. For simplicial cells, the relative ordering in combination with the convention of numbering the vertices locally based on global vertex indices means that two incident cells will always agree on the orientation of incident subsimplices. Thus, two incident triangles will agree on the orientation of the common edge and two incident tetrahedra will agree on the orientation of the common edge(s) and the orientation of the common face (if any). This is illustrated in Figure~\ref{fig:orientation_example_triangles} for two incident triangles sharing a common edge. \begin{figure}[htbp] \begin{center} \psfrag{v0}{$v_0$} \psfrag{v1}{$v_1$} \psfrag{v2}{$v_2$} \psfrag{v3}{$v_3$} \includegraphics[width=9cm]{eps/orientation_example_triangles.eps} \caption{Two incident triangles will always agree on the orientation of the common edge.} \label{fig:orientation_example_triangles} \end{center} \end{figure} \subsection{Limitations} The UFC specification is only concerned with the ordering of mesh entities with respect to entities of larger topological dimension. In other words, the UFC specification is only concerned with the ordering of incidence relations of the class $d - d'$ where $d > d'$. For example, the UFC specification is not concerned with the ordering of incidence relations of the class $0 - 1$, that is, the ordering of edges incident to vertices. \newpage \section{Numbering schemes for reference cells} The numbering scheme is demonstrated below for cells isomorphic to each of the five reference cells. \subsection{Numbering of mesh entities on intervals} \begin{minipage}{\textwidth} \linespread{1.2}\selectfont \begin{center} \begin{tabular}{|c|c|c|} \hline Entity & Incident vertices & Non-incident vertices \\ \hline \hline $v_0 = (0, 0)$ & $(v_0)$ & $(v_1)$ \\ \hline $v_1 = (0, 1)$ & $(v_1)$ & $(v_0)$ \\ \hline $c_0 = (1, 0)$ & $(v_0, v_1)$ & $\emptyset$ \\ \hline \end{tabular} \end{center} \end{minipage} \subsection{Numbering of mesh entities on triangular cells} % \begin{minipage}{\textwidth} \linespread{1.2}\selectfont \begin{center} \begin{tabular}{|c|c|c|} \hline Entity & Incident vertices & Non-incident vertices \\ \hline \hline $v_0 = (0, 0)$ & $(v_0)$ & $(v_1, v_2)$ \\ \hline $v_1 = (0, 1)$ & $(v_1)$ & $(v_0, v_2)$ \\ \hline $v_2 = (0, 2)$ & $(v_2)$ & $(v_0, v_1)$ \\ \hline $e_0 = (1, 0)$ & $(v_1, v_2)$ & $(v_0)$ \\ \hline $e_1 = (1, 1)$ & $(v_0, v_2)$ & $(v_1)$ \\ \hline $e_2 = (1, 2)$ & $(v_0, v_1)$ & $(v_2)$ \\ \hline $c_0 = (2, 0)$ & $(v_0, v_1, v_2)$ & $\emptyset$ \\ \hline \end{tabular} \end{center} \end{minipage} \subsection{Numbering of mesh entities on quadrilateral cells} % \begin{minipage}{\textwidth} \linespread{1.1}\selectfont \begin{center} \begin{tabular}{|c|c|c|} \hline Entity & Incident vertices & Non-incident vertices \\ \hline \hline $v_0 = (0, 0)$ & $(v_0)$ & $(v_1, v_2, v_3)$ \\ \hline $v_1 = (0, 1)$ & $(v_1)$ & $(v_0, v_2, v_3)$ \\ \hline $v_2 = (0, 2)$ & $(v_2)$ & $(v_0, v_1, v_3)$ \\ \hline $v_3 = (0, 3)$ & $(v_3)$ & $(v_0, v_1, v_2)$ \\ \hline $e_0 = (1, 0)$ & $(v_2, v_3)$ & $(v_0, v_1)$ \\ \hline $e_1 = (1, 1)$ & $(v_1, v_2)$ & $(v_0, v_3)$ \\ \hline $e_2 = (1, 2)$ & $(v_0, v_3)$ & $(v_1, v_2)$ \\ \hline $e_3 = (1, 3)$ & $(v_0, v_1)$ & $(v_2, v_3)$ \\ \hline $c_0 = (2, 0)$ & $(v_0, v_1, v_2, v_3)$ & $\emptyset$ \\ \hline \end{tabular} \end{center} \end{minipage} \subsection{Numbering of mesh entities on tetrahedral cells} % \begin{minipage}{\textwidth} \linespread{1.1}\selectfont \begin{center} \begin{tabular}{|c|c|c|} \hline Entity & Incident vertices & Non-incident vertices \\ \hline \hline $v_0 = (0, 0)$ & $(v_0)$ & $(v_1, v_2, v_3)$ \\ \hline $v_1 = (0, 1)$ & $(v_1)$ & $(v_0, v_2, v_3)$ \\ \hline $v_2 = (0, 2)$ & $(v_2)$ & $(v_0, v_1, v_3)$ \\ \hline $v_3 = (0, 3)$ & $(v_3)$ & $(v_0, v_1, v_2)$ \\ \hline $e_0 = (1, 0)$ & $(v_2, v_3)$ & $(v_0, v_1)$ \\ \hline $e_1 = (1, 1)$ & $(v_1, v_3)$ & $(v_0, v_2)$ \\ \hline $e_2 = (1, 2)$ & $(v_1, v_2)$ & $(v_0, v_3)$ \\ \hline $e_3 = (1, 3)$ & $(v_0, v_3)$ & $(v_1, v_2)$ \\ \hline $e_4 = (1, 4)$ & $(v_0, v_2)$ & $(v_1, v_3)$ \\ \hline $e_5 = (1, 5)$ & $(v_0, v_1)$ & $(v_2, v_3)$ \\ \hline $f_0 = (2, 0)$ & $(v_1, v_2, v_3)$ & $(v_0)$ \\ \hline $f_1 = (2, 1)$ & $(v_0, v_2, v_3)$ & $(v_1)$ \\ \hline $f_2 = (2, 2)$ & $(v_0, v_1, v_3)$ & $(v_2)$ \\ \hline $f_3 = (2, 3)$ & $(v_0, v_1, v_2)$ & $(v_3)$ \\ \hline $c_0 = (3, 0)$ & $(v_0, v_1, v_2, v_3)$ & $\emptyset$ \\ \hline \end{tabular} \end{center} \end{minipage} \vfill \newpage \subsection{Numbering of mesh entities on hexahedral cells} \begin{minipage}{\textwidth} \small \linespread{1.2}\selectfont \begin{center} \begin{tabular}{|c|c|c|} \hline Entity & Incident vertices & Non-incident vertices \\ \hline \hline $v_0 = (0, 0)$ & $(v_0)$ & $(v_1, v_2, v_3, v_4, v_5, v_6, v_7)$ \\ \hline $v_1 = (0, 1)$ & $(v_1)$ & $(v_0, v_2, v_3, v_4, v_5, v_6, v_7)$ \\ \hline $v_2 = (0, 2)$ & $(v_2)$ & $(v_0, v_1, v_3, v_4, v_5, v_6, v_7)$ \\ \hline $v_3 = (0, 3)$ & $(v_3)$ & $(v_0, v_1, v_2, v_4, v_5, v_6, v_7)$ \\ \hline $v_4 = (0, 4)$ & $(v_4)$ & $(v_0, v_1, v_2, v_3, v_5, v_6, v_7)$ \\ \hline $v_5 = (0, 5)$ & $(v_5)$ & $(v_0, v_1, v_2, v_3, v_4, v_6, v_7)$ \\ \hline $v_6 = (0, 6)$ & $(v_6)$ & $(v_0, v_1, v_2, v_3, v_4, v_5, v_7)$ \\ \hline $v_7 = (0, 7)$ & $(v_7)$ & $(v_0, v_1, v_2, v_3, v_4, v_5, v_6)$ \\ \hline $e_0 = (1, 0)$ & $(v_6, v_7)$ & $(v_0, v_1, v_2, v_3, v_4, v_5)$ \\ \hline $e_1 = (1, 1)$ & $(v_5, v_6)$ & $(v_0, v_1, v_2, v_3, v_4, v_7)$ \\ \hline $e_2 = (1, 2)$ & $(v_4, v_7)$ & $(v_0, v_1, v_2, v_3, v_5, v_6)$ \\ \hline $e_3 = (1, 3)$ & $(v_4, v_5)$ & $(v_0, v_1, v_2, v_3, v_6, v_7)$ \\ \hline $e_4 = (1, 4)$ & $(v_3, v_7)$ & $(v_0, v_1, v_2, v_4, v_5, v_6)$ \\ \hline $e_5 = (1, 5)$ & $(v_2, v_6)$ & $(v_0, v_1, v_3, v_4, v_5, v_7)$ \\ \hline $e_6 = (1, 6)$ & $(v_2, v_3)$ & $(v_0, v_1, v_4, v_5, v_6, v_7)$ \\ \hline $e_7 = (1, 7)$ & $(v_1, v_5)$ & $(v_0, v_2, v_3, v_4, v_6, v_7)$ \\ \hline $e_8 = (1, 8)$ & $(v_1, v_2)$ & $(v_0, v_3, v_4, v_5, v_6, v_7)$ \\ \hline $e_9 = (1, 9)$ & $(v_0, v_4)$ & $(v_1, v_2, v_3, v_5, v_6, v_7)$ \\ \hline $e_{10} = (1, 10)$ & $(v_0, v_3)$ & $(v_1, v_2, v_4, v_5, v_6, v_7)$ \\ \hline $e_{11} = (1, 11)$ & $(v_0, v_1)$ & $(v_2, v_3, v_4, v_5, v_6, v_7)$ \\ \hline $f_0 = (2, 0)$ & $(v_4, v_5, v_6, v_7)$ & $(v_0, v_1, v_2, v_3)$ \\ \hline $f_1 = (2, 1)$ & $(v_2, v_3, v_6, v_7)$ & $(v_0, v_1, v_4, v_5)$ \\ \hline $f_2 = (2, 2)$ & $(v_1, v_2, v_5, v_6)$ & $(v_0, v_3, v_4, v_7)$ \\ \hline $f_3 = (2, 3)$ & $(v_0, v_3, v_4, v_7)$ & $(v_1, v_2, v_5, v_6)$ \\ \hline $f_4 = (2, 4)$ & $(v_0, v_1, v_4, v_5)$ & $(v_2, v_3, v_6, v_7)$ \\ \hline $f_5 = (2, 5)$ & $(v_0, v_1, v_2, v_3)$ & $(v_4, v_5, v_6, v_7)$ \\ \hline $c_0 = (3, 0)$ & $(v_0, v_1, v_2, v_3, v_4, v_5, v_6, v_7)$ & $\emptyset$ \\ \hline \end{tabular} \end{center} \end{minipage} ffc-1.6.0/ufc-merge-into-ffc/doc/manual/chapters/pythonutils.tex000066400000000000000000000027271255571034100246230ustar00rootroot00000000000000\chapter{Python utilities} \index{Python utilities} \index{code generation} The UFC distribution includes a set of Python utilities for generating code that conforms to the UFC specification. These utilities consist of format string templates for C++ header files (\texttt{.h} files), implementation files (\texttt{.cpp}) and combined header and implementation files (\texttt{.h} files containing both the declaration and definition of the UFC functions). The following format strings are provided: \begin{code} function_combined_{header, implementation, combined} finite_element_{header, implementation, combined} dofmap_{header, implementation, combined} cell_integral_{header, implementation, combined} exterior_facet_integral_{header, implementation, combined} interior_facet_integral_{header, implementation, combined} form_{header, implementation, combined} \end{code} We demonstrate below how to use the format string \texttt{form\_combined} together with a dictionary that specifies the code to be inserted into the format string. Typically, a form compiler will first generate the code to be inserted into the dictionary and then in a later stage write the generated code to file in UFC format using the provided format strings. \begin{code} from ufc import form_combined code = {} code["classname"] = "Poisson", ... code["rank"] = " return 2;", code["num_coefficients"] = " return 0;", code["num_cell_domains"] = " return 1;", ... print form_combined % code \end{code} ffc-1.6.0/ufc-merge-into-ffc/doc/manual/chapters/referencecells.tex000066400000000000000000000001351255571034100251710ustar00rootroot00000000000000\chapter{Reference cells} \label{sec:referencecells} \input{chapters/referencecells_common} ffc-1.6.0/ufc-merge-into-ffc/doc/manual/chapters/referencecells_common.tex000066400000000000000000000141431255571034100265450ustar00rootroot00000000000000\index{reference cells} The following five reference cells are covered by the UFC specification: the reference \emph{interval}, the reference \emph{triangle}, the reference \emph{quadrilateral}, the reference \emph{tetrahedron} and the reference \emph{hexahedron} (see Table~\ref{tab:ufc_reference_cells}). \begin{table} \linespread{1.2}\selectfont \begin{center} \begin{tabular}{|l|c|c|c|} \hline Reference cell & Dimension & \#Vertices & \#Facets \\ \hline \hline The reference interval & 1 & 2 & 2 \\ \hline The reference triangle & 2 & 3 & 3 \\ \hline The reference quadrilateral & 2 & 4 & 4 \\ \hline The reference tetrahedron & 3 & 4 & 4 \\ \hline The reference hexahedron & 3 & 8 & 6 \\ \hline \end{tabular} \caption{Reference cells covered by the UFC specification.} \label{tab:ufc_reference_cells} \end{center} \end{table} The UFC specification assumes that each cell in a finite element mesh is always isomorphic to one of the reference cells. \section{The reference interval} \index{interval} The reference interval is shown in Figure~\ref{fig:interval} and is defined by its two vertices with coordinates as specified in Table~\ref{tab:interval,vertices}. \begin{figure} \begin{center} \psfrag{0}{$0$} \psfrag{1}{$1$} \includegraphics[width=10cm]{eps/interval.eps} \caption{The reference interval.} \label{fig:interval} \end{center} \end{figure} \begin{table} \linespread{1.2}\selectfont \begin{center} \begin{tabular}{|c|c|} \hline Vertex & Coordinate \\ \hline \hline $v_0$ & $x = 0$ \\ \hline $v_1$ & $x = 1$ \\ \hline \end{tabular} \caption{Vertex coordinates of the reference interval.} \label{tab:interval,vertices} \end{center} \end{table} \section{The reference triangle} \index{triangle} The reference triangle is shown in Figure~\ref{fig:triangle} and is defined by its three vertices with coordinates as specified in Table~\ref{tab:triangle,vertices}. \begin{figure} \begin{center} \psfrag{v0}{$(0, 0)$} \psfrag{v1}{$(1, 0)$} \psfrag{v2}{$(0, 1)$} \includegraphics[width=8cm]{eps/triangle.eps} \caption{The reference triangle.} \label{fig:triangle} \end{center} \end{figure} \begin{table} \linespread{1.2}\selectfont \begin{center} \begin{tabular}{|c|c|} \hline Vertex & Coordinate \\ \hline \hline $v_0$ & $x = (0, 0)$ \\ \hline $v_1$ & $x = (1, 0)$ \\ \hline $v_2$ & $x = (0, 1)$ \\ \hline \end{tabular} \caption{Vertex coordinates of the reference triangle.} \label{tab:triangle,vertices} \end{center} \end{table} \section{The reference quadrilateral} \index{quadrilateral} The reference quadrilateral is shown in Figure~\ref{fig:quadrilateral} and is defined by its four vertices with coordinates as specified in Table~\ref{tab:quadrilateral,vertices}. \begin{figure} \begin{center} \psfrag{v0}{$(0, 0)$} \psfrag{v1}{$(1, 0)$} \psfrag{v2}{$(1, 1)$} \psfrag{v3}{$(0, 1)$} \includegraphics[width=8cm]{eps/quadrilateral.eps} \caption{The reference quadrilateral.} \label{fig:quadrilateral} \end{center} \end{figure} \begin{table} \linespread{1.2}\selectfont \begin{center} \begin{tabular}{|c|c|} \hline Vertex & Coordinate \\ \hline \hline $v_0$ & $x = (0, 0)$ \\ \hline $v_1$ & $x = (1, 0)$ \\ \hline $v_2$ & $x = (1, 1)$ \\ \hline $v_3$ & $x = (0, 1)$ \\ \hline \end{tabular} \caption{Vertex coordinates of the reference quadrilateral.} \label{tab:quadrilateral,vertices} \end{center} \end{table} \section{The reference tetrahedron} \index{tetrahedron} The reference tetrahedron is shown in Figure~\ref{fig:tetrahedron} and is defined by its four vertices with coordinates as specified in Table~\ref{tab:tetrahedron,vertices}. \begin{figure} \begin{center} \psfrag{v0}{$(0, 0, 0)$} \psfrag{v1}{$(1, 0, 0)$} \psfrag{v2}{$(0, 1, 0)$} \psfrag{v3}{$(0, 0, 1)$} \includegraphics[width=6cm]{eps/tetrahedron.eps} \caption{The reference tetrahedron.} \label{fig:tetrahedron} \end{center} \end{figure} \begin{table} \linespread{1.2}\selectfont \begin{center} \begin{tabular}{|c|c|} \hline Vertex & Coordinate \\ \hline \hline $v_0$ & $x = (0, 0, 0)$ \\ \hline $v_1$ & $x = (1, 0, 0)$ \\ \hline $v_2$ & $x = (0, 1, 0)$ \\ \hline $v_3$ & $x = (0, 0, 1)$ \\ \hline \end{tabular} \caption{Vertex coordinates of the reference tetrahedron.} \label{tab:tetrahedron,vertices} \end{center} \end{table} \section{The reference hexahedron} \index{hexahedron} The reference hexahedron is shown in Figure~\ref{fig:hexahedron} and is defined by its eight vertices with coordinates as specified in Table~\ref{tab:hexahedron,vertices}. \begin{figure} \linespread{1.2}\selectfont \begin{center} \psfrag{v0}{$(0, 0, 0)$} \psfrag{v1}{$(1, 0, 0)$} \psfrag{v2}{$(1, 1, 0)$} \psfrag{v3}{$(0, 1, 0)$} \psfrag{v4}{$(0, 0, 1)$} \psfrag{v5}{$(1, 0, 1)$} \psfrag{v6}{$(1, 1, 1)$} \psfrag{v7}{$(0, 1, 1)$} \includegraphics[width=9cm]{eps/hexahedron.eps} \caption{The reference hexahedron.} \label{fig:hexahedron} \end{center} \end{figure} \begin{table} \linespread{1.2}\selectfont \begin{center} \begin{tabular}{|c|c|} \hline Vertex & Coordinate \\ \hline \hline $v_0$ & $x = (0, 0, 0)$ \\ \hline $v_1$ & $x = (1, 0, 0)$ \\ \hline $v_2$ & $x = (1, 1, 0)$ \\ \hline $v_3$ & $x = (0, 1, 0)$ \\ \hline \end{tabular} \begin{tabular}{|c|c|} \hline Vertex & Coordinate \\ \hline \hline $v_4$ & $x = (0, 0, 1)$ \\ \hline $v_5$ & $x = (1, 0, 1)$ \\ \hline $v_6$ & $x = (1, 1, 1)$ \\ \hline $v_7$ & $x = (0, 1, 1)$ \\ \hline \end{tabular} \caption{Vertex coordinates of the reference hexahedron.} \label{tab:hexahedron,vertices} \end{center} \end{table} ffc-1.6.0/ufc-merge-into-ffc/doc/manual/chapters/versions.tex000066400000000000000000000055311255571034100240650ustar00rootroot00000000000000\chapter{\ufc{} versions} \label{app:versions} \index{versions} To keep things simple, the UFC classes do not have any run time version control. To upgrade to a new UFC version, all libraries and applications must therefore be recompiled with the new header file \texttt{ufc.h}. \section{Version 1.0} Initial release. \section{Version 1.1} The following six functions have been added to the interface: \begin{itemize} \item \texttt{ufc::finite\_element::evaluate\_dofs} \item \texttt{ufc::finite\_element::evaluate\_basis\_all} \item \texttt{ufc::finite\_element::evaluate\_basis\_derivatives\_all} \item \texttt{ufc::dof\_map::geometric\_dimension} \item \texttt{ufc::dof\_map::num\_entity\_dofs} \item \texttt{ufc::dof\_map::tabulate\_entity\_dofs} \end{itemize} An implementation of UFC version 1.0 can be recompiled with the header file from UFC version 1.1 without changes to the source code. The new functions introduced in 1.1 will then simply throw an informative exception. (The new functions are virtual but not pure virtual.) \section{Version 1.2} The following functions have been modified: \begin{itemize} \item \texttt{ufc::dof\_map::local\_dimension} \end{itemize} The following functions have been added to the interface: \begin{itemize} \item \texttt{ufc::dof\_map::max\_local\_dimension} \end{itemize} \section{Version 1.4} The behavior of the functions \texttt{ufc::form::create\_cell\_integral}, \texttt{ufc::form::create\_exterior\_facet\_integral} and \texttt{ufc::form::create\_interior\_facet\_integral} were changed to return a zero pointer when there is no integral on the given domain. \section{Version 2.0} The following names have been changed: \begin{itemize} \item \texttt{ufc::dof\_map} $\rightarrow$ \texttt{ufc::dofmap} \item \texttt{ufc::form::num\_cell\_integrals} $\rightarrow$ \texttt{ufc::form::num\_cell\_domains} \item \texttt{ufc::form::num\_exterior\_facet\_integrals} $\rightarrow$ \texttt{ufc::form::num\_exterior\_facet\_domains} \item \texttt{ufc::form::num\_interior\_facet\_integrals} $\rightarrow$ \texttt{ufc::form::num\_interior\_facet\_domains} \end{itemize} The following new data members have been added: \begin{itemize} \item \texttt{ufc::cell:index} \item \texttt{ufc::cell:local\_facet} \item \texttt{ufc::cell:mesh\_identifier} \end{itemize} The following new functions have been added: \begin{itemize} \item \texttt{ufc::finite\_element::topological\_dimension} \item \texttt{ufc::finite\_element::geometric\_dimension} \item \texttt{ufc::finite\_element::create} \item \texttt{ufc::dofmap::topological\_dimension} \item \texttt{ufc::dofmap::create} \item \texttt{ufc::cell\_integral::tabulate\_tensor} (quadrature version) \item \texttt{ufc::exterior\_facet\_integral::tabulate\_tensor} (quadrature version) \item \texttt{ufc::interior\_integral::tabulate\_tensor} (quadrature version) \end{itemize} ffc-1.6.0/ufc-merge-into-ffc/doc/manual/code/000077500000000000000000000000001255571034100205705ustar00rootroot00000000000000ffc-1.6.0/ufc-merge-into-ffc/doc/manual/code/Poisson.ufl000066400000000000000000000006061255571034100227340ustar00rootroot00000000000000# Copyright (C) 2004-2007 Anders Logg (logg@simula.no) # Licensed under the GNU GPL Version 2 # # The bilinear form a(v, u) and linear form L(v) for # Poisson's equation. # # Compile this form with FFC: ffc Poisson.form element = FiniteElement("Lagrange", "triangle", 1) v = TestFunction(element) u = TrialFunction(element) f = Function(element) a = dot(grad(v), grad(u))*dx #L = v*f*dx ffc-1.6.0/ufc-merge-into-ffc/doc/manual/code/poisson_ffc.h000066400000000000000000001257241255571034100232640ustar00rootroot00000000000000// This code conforms with the UFC specification version 1.0 // and was automatically generated by FFC version 0.6.2. #ifndef __POISSON_H #define __POISSON_H #include #include #include /// This class defines the interface for a finite element. class PoissonBilinearForm_finite_element_0: public ufc::finite_element { public: /// Constructor PoissonBilinearForm_finite_element_0() : ufc::finite_element() { // Do nothing } /// Destructor virtual ~PoissonBilinearForm_finite_element_0() { // Do nothing } /// Return a string identifying the finite element virtual const char* signature() const { return "FiniteElement('Lagrange', 'triangle', 1)"; } /// Return the cell shape virtual ufc::shape cell_shape() const { return ufc::triangle; } /// Return the dimension of the finite element function space virtual unsigned int space_dimension() const { return 3; } /// Return the rank of the value space virtual unsigned int value_rank() const { return 0; } /// Return the dimension of the value space for axis i virtual unsigned int value_dimension(unsigned int i) const { return 1; } /// Evaluate basis function i at given point in cell virtual void evaluate_basis(unsigned int i, double* values, const double* coordinates, const ufc::cell& c) const { // Extract vertex coordinates const double * const * element_coordinates = c.coordinates; // Compute Jacobian of affine map from reference cell const double J_00 = element_coordinates[1][0] - element_coordinates[0][0]; const double J_01 = element_coordinates[2][0] - element_coordinates[0][0]; const double J_10 = element_coordinates[1][1] - element_coordinates[0][1]; const double J_11 = element_coordinates[2][1] - element_coordinates[0][1]; // Compute determinant of Jacobian const double detJ = J_00*J_11 - J_01*J_10; // Compute inverse of Jacobian // Get coordinates and map to the reference (UFC) element double x = (element_coordinates[0][1]*element_coordinates[2][0] -\ element_coordinates[0][0]*element_coordinates[2][1] +\ J_11*coordinates[0] - J_01*coordinates[1]) / detJ; double y = (element_coordinates[1][1]*element_coordinates[0][0] -\ element_coordinates[1][0]*element_coordinates[0][1] -\ J_10*coordinates[0] + J_00*coordinates[1]) / detJ; // Map coordinates to the reference square if (std::abs(y - 1.0) < 1e-14) x = -1.0; else x = 2.0 *x/(1.0 - y) - 1.0; y = 2.0*y - 1.0; // Reset values *values = 0; // Map degree of freedom to element degree of freedom const unsigned int dof = i; // Generate scalings const double scalings_y_0 = 1; const double scalings_y_1 = scalings_y_0*(0.5 - 0.5*y); // Compute psitilde_a const double psitilde_a_0 = 1; const double psitilde_a_1 = x; // Compute psitilde_bs const double psitilde_bs_0_0 = 1; const double psitilde_bs_0_1 = 1.5*y + 0.5; const double psitilde_bs_1_0 = 1; // Compute basisvalues const double basisvalue0 = 0.707106781186548*psitilde_a_0*scalings_y_0*psitilde_bs_0_0; const double basisvalue1 = 1.73205080756888*psitilde_a_1*scalings_y_1*psitilde_bs_1_0; const double basisvalue2 = psitilde_a_0*scalings_y_0*psitilde_bs_0_1; // Table(s) of coefficients const static double coefficients0[3][3] = \ {{0.471404520791032, -0.288675134594813, -0.166666666666667}, {0.471404520791032, 0.288675134594813, -0.166666666666667}, {0.471404520791032, 0, 0.333333333333333}}; // Extract relevant coefficients const double coeff0_0 = coefficients0[dof][0]; const double coeff0_1 = coefficients0[dof][1]; const double coeff0_2 = coefficients0[dof][2]; // Compute value(s) *values = coeff0_0*basisvalue0 + coeff0_1*basisvalue1 + coeff0_2*basisvalue2; } /// Evaluate all basis functions at given point in cell virtual void evaluate_basis_all(double* values, const double* coordinates, const ufc::cell& c) const { throw std::runtime_error("The vectorised version of evaluate_basis() is not yet implemented."); } /// Evaluate order n derivatives of basis function i at given point in cell virtual void evaluate_basis_derivatives(unsigned int i, unsigned int n, double* values, const double* coordinates, const ufc::cell& c) const { // Extract vertex coordinates const double * const * element_coordinates = c.coordinates; // Compute Jacobian of affine map from reference cell const double J_00 = element_coordinates[1][0] - element_coordinates[0][0]; const double J_01 = element_coordinates[2][0] - element_coordinates[0][0]; const double J_10 = element_coordinates[1][1] - element_coordinates[0][1]; const double J_11 = element_coordinates[2][1] - element_coordinates[0][1]; // Compute determinant of Jacobian const double detJ = J_00*J_11 - J_01*J_10; // Compute inverse of Jacobian // Get coordinates and map to the reference (UFC) element double x = (element_coordinates[0][1]*element_coordinates[2][0] -\ element_coordinates[0][0]*element_coordinates[2][1] +\ J_11*coordinates[0] - J_01*coordinates[1]) / detJ; double y = (element_coordinates[1][1]*element_coordinates[0][0] -\ element_coordinates[1][0]*element_coordinates[0][1] -\ J_10*coordinates[0] + J_00*coordinates[1]) / detJ; // Map coordinates to the reference square if (std::abs(y - 1.0) < 1e-14) x = -1.0; else x = 2.0 *x/(1.0 - y) - 1.0; y = 2.0*y - 1.0; // Compute number of derivatives unsigned int num_derivatives = 1; for (unsigned int j = 0; j < n; j++) num_derivatives *= 2; // Declare pointer to two dimensional array that holds combinations of derivatives and initialise unsigned int **combinations = new unsigned int *[num_derivatives]; for (unsigned int j = 0; j < num_derivatives; j++) { combinations[j] = new unsigned int [n]; for (unsigned int k = 0; k < n; k++) combinations[j][k] = 0; } // Generate combinations of derivatives for (unsigned int row = 1; row < num_derivatives; row++) { for (unsigned int num = 0; num < row; num++) { for (unsigned int col = n-1; col+1 > 0; col--) { if (combinations[row][col] + 1 > 1) combinations[row][col] = 0; else { combinations[row][col] += 1; break; } } } } // Compute inverse of Jacobian const double Jinv[2][2] = {{J_11 / detJ, -J_01 / detJ}, {-J_10 / detJ, J_00 / detJ}}; // Declare transformation matrix // Declare pointer to two dimensional array and initialise double **transform = new double *[num_derivatives]; for (unsigned int j = 0; j < num_derivatives; j++) { transform[j] = new double [num_derivatives]; for (unsigned int k = 0; k < num_derivatives; k++) transform[j][k] = 1; } // Construct transformation matrix for (unsigned int row = 0; row < num_derivatives; row++) { for (unsigned int col = 0; col < num_derivatives; col++) { for (unsigned int k = 0; k < n; k++) transform[row][col] *= Jinv[combinations[col][k]][combinations[row][k]]; } } // Reset values for (unsigned int j = 0; j < 1*num_derivatives; j++) values[j] = 0; // Map degree of freedom to element degree of freedom const unsigned int dof = i; // Generate scalings const double scalings_y_0 = 1; const double scalings_y_1 = scalings_y_0*(0.5 - 0.5*y); // Compute psitilde_a const double psitilde_a_0 = 1; const double psitilde_a_1 = x; // Compute psitilde_bs const double psitilde_bs_0_0 = 1; const double psitilde_bs_0_1 = 1.5*y + 0.5; const double psitilde_bs_1_0 = 1; // Compute basisvalues const double basisvalue0 = 0.707106781186548*psitilde_a_0*scalings_y_0*psitilde_bs_0_0; const double basisvalue1 = 1.73205080756888*psitilde_a_1*scalings_y_1*psitilde_bs_1_0; const double basisvalue2 = psitilde_a_0*scalings_y_0*psitilde_bs_0_1; // Table(s) of coefficients const static double coefficients0[3][3] = \ {{0.471404520791032, -0.288675134594813, -0.166666666666667}, {0.471404520791032, 0.288675134594813, -0.166666666666667}, {0.471404520791032, 0, 0.333333333333333}}; // Interesting (new) part // Tables of derivatives of the polynomial base (transpose) const static double dmats0[3][3] = \ {{0, 0, 0}, {4.89897948556636, 0, 0}, {0, 0, 0}}; const static double dmats1[3][3] = \ {{0, 0, 0}, {2.44948974278318, 0, 0}, {4.24264068711928, 0, 0}}; // Compute reference derivatives // Declare pointer to array of derivatives on FIAT element double *derivatives = new double [num_derivatives]; // Declare coefficients double coeff0_0 = 0; double coeff0_1 = 0; double coeff0_2 = 0; // Declare new coefficients double new_coeff0_0 = 0; double new_coeff0_1 = 0; double new_coeff0_2 = 0; // Loop possible derivatives for (unsigned int deriv_num = 0; deriv_num < num_derivatives; deriv_num++) { // Get values from coefficients array new_coeff0_0 = coefficients0[dof][0]; new_coeff0_1 = coefficients0[dof][1]; new_coeff0_2 = coefficients0[dof][2]; // Loop derivative order for (unsigned int j = 0; j < n; j++) { // Update old coefficients coeff0_0 = new_coeff0_0; coeff0_1 = new_coeff0_1; coeff0_2 = new_coeff0_2; if(combinations[deriv_num][j] == 0) { new_coeff0_0 = coeff0_0*dmats0[0][0] + coeff0_1*dmats0[1][0] + coeff0_2*dmats0[2][0]; new_coeff0_1 = coeff0_0*dmats0[0][1] + coeff0_1*dmats0[1][1] + coeff0_2*dmats0[2][1]; new_coeff0_2 = coeff0_0*dmats0[0][2] + coeff0_1*dmats0[1][2] + coeff0_2*dmats0[2][2]; } if(combinations[deriv_num][j] == 1) { new_coeff0_0 = coeff0_0*dmats1[0][0] + coeff0_1*dmats1[1][0] + coeff0_2*dmats1[2][0]; new_coeff0_1 = coeff0_0*dmats1[0][1] + coeff0_1*dmats1[1][1] + coeff0_2*dmats1[2][1]; new_coeff0_2 = coeff0_0*dmats1[0][2] + coeff0_1*dmats1[1][2] + coeff0_2*dmats1[2][2]; } } // Compute derivatives on reference element as dot product of coefficients and basisvalues derivatives[deriv_num] = new_coeff0_0*basisvalue0 + new_coeff0_1*basisvalue1 + new_coeff0_2*basisvalue2; } // Transform derivatives back to physical element for (unsigned int row = 0; row < num_derivatives; row++) { for (unsigned int col = 0; col < num_derivatives; col++) { values[row] += transform[row][col]*derivatives[col]; } } // Delete pointer to array of derivatives on FIAT element delete [] derivatives; // Delete pointer to array of combinations of derivatives and transform for (unsigned int row = 0; row < num_derivatives; row++) { delete [] combinations[row]; delete [] transform[row]; } delete [] combinations; delete [] transform; } /// Evaluate order n derivatives of all basis functions at given point in cell virtual void evaluate_basis_derivatives_all(unsigned int n, double* values, const double* coordinates, const ufc::cell& c) const { throw std::runtime_error("The vectorised version of evaluate_basis_derivatives() is not yet implemented."); } /// Evaluate linear functional for dof i on the function f virtual double evaluate_dof(unsigned int i, const ufc::function& f, const ufc::cell& c) const { // The reference points, direction and weights: const static double X[3][1][2] = {{{0, 0}}, {{1, 0}}, {{0, 1}}}; const static double W[3][1] = {{1}, {1}, {1}}; const static double D[3][1][1] = {{{1}}, {{1}}, {{1}}}; const double * const * x = c.coordinates; double result = 0.0; // Iterate over the points: // Evaluate basis functions for affine mapping const double w0 = 1.0 - X[i][0][0] - X[i][0][1]; const double w1 = X[i][0][0]; const double w2 = X[i][0][1]; // Compute affine mapping y = F(X) double y[2]; y[0] = w0*x[0][0] + w1*x[1][0] + w2*x[2][0]; y[1] = w0*x[0][1] + w1*x[1][1] + w2*x[2][1]; // Evaluate function at physical points double values[1]; f.evaluate(values, y, c); // Map function values using appropriate mapping // Affine map: Do nothing // Note that we do not map the weights (yet). // Take directional components for(int k = 0; k < 1; k++) result += values[k]*D[i][0][k]; // Multiply by weights result *= W[i][0]; return result; } /// Evaluate linear functionals for all dofs on the function f virtual void evaluate_dofs(double* values, const ufc::function& f, const ufc::cell& c) const { throw std::runtime_error("Not implemented (introduced in UFC v1.1)."); } /// Interpolate vertex values from dof values virtual void interpolate_vertex_values(double* vertex_values, const double* dof_values, const ufc::cell& c) const { // Evaluate at vertices and use affine mapping vertex_values[0] = dof_values[0]; vertex_values[1] = dof_values[1]; vertex_values[2] = dof_values[2]; } /// Return the number of sub elements (for a mixed element) virtual unsigned int num_sub_elements() const { return 1; } /// Create a new finite element for sub element i (for a mixed element) virtual ufc::finite_element* create_sub_element(unsigned int i) const { return new PoissonBilinearForm_finite_element_0(); } }; /// This class defines the interface for a finite element. class PoissonBilinearForm_finite_element_1: public ufc::finite_element { public: /// Constructor PoissonBilinearForm_finite_element_1() : ufc::finite_element() { // Do nothing } /// Destructor virtual ~PoissonBilinearForm_finite_element_1() { // Do nothing } /// Return a string identifying the finite element virtual const char* signature() const { return "FiniteElement('Lagrange', 'triangle', 1)"; } /// Return the cell shape virtual ufc::shape cell_shape() const { return ufc::triangle; } /// Return the dimension of the finite element function space virtual unsigned int space_dimension() const { return 3; } /// Return the rank of the value space virtual unsigned int value_rank() const { return 0; } /// Return the dimension of the value space for axis i virtual unsigned int value_dimension(unsigned int i) const { return 1; } /// Evaluate basis function i at given point in cell virtual void evaluate_basis(unsigned int i, double* values, const double* coordinates, const ufc::cell& c) const { // Extract vertex coordinates const double * const * element_coordinates = c.coordinates; // Compute Jacobian of affine map from reference cell const double J_00 = element_coordinates[1][0] - element_coordinates[0][0]; const double J_01 = element_coordinates[2][0] - element_coordinates[0][0]; const double J_10 = element_coordinates[1][1] - element_coordinates[0][1]; const double J_11 = element_coordinates[2][1] - element_coordinates[0][1]; // Compute determinant of Jacobian const double detJ = J_00*J_11 - J_01*J_10; // Compute inverse of Jacobian // Get coordinates and map to the reference (UFC) element double x = (element_coordinates[0][1]*element_coordinates[2][0] -\ element_coordinates[0][0]*element_coordinates[2][1] +\ J_11*coordinates[0] - J_01*coordinates[1]) / detJ; double y = (element_coordinates[1][1]*element_coordinates[0][0] -\ element_coordinates[1][0]*element_coordinates[0][1] -\ J_10*coordinates[0] + J_00*coordinates[1]) / detJ; // Map coordinates to the reference square if (std::abs(y - 1.0) < 1e-14) x = -1.0; else x = 2.0 *x/(1.0 - y) - 1.0; y = 2.0*y - 1.0; // Reset values *values = 0; // Map degree of freedom to element degree of freedom const unsigned int dof = i; // Generate scalings const double scalings_y_0 = 1; const double scalings_y_1 = scalings_y_0*(0.5 - 0.5*y); // Compute psitilde_a const double psitilde_a_0 = 1; const double psitilde_a_1 = x; // Compute psitilde_bs const double psitilde_bs_0_0 = 1; const double psitilde_bs_0_1 = 1.5*y + 0.5; const double psitilde_bs_1_0 = 1; // Compute basisvalues const double basisvalue0 = 0.707106781186548*psitilde_a_0*scalings_y_0*psitilde_bs_0_0; const double basisvalue1 = 1.73205080756888*psitilde_a_1*scalings_y_1*psitilde_bs_1_0; const double basisvalue2 = psitilde_a_0*scalings_y_0*psitilde_bs_0_1; // Table(s) of coefficients const static double coefficients0[3][3] = \ {{0.471404520791032, -0.288675134594813, -0.166666666666667}, {0.471404520791032, 0.288675134594813, -0.166666666666667}, {0.471404520791032, 0, 0.333333333333333}}; // Extract relevant coefficients const double coeff0_0 = coefficients0[dof][0]; const double coeff0_1 = coefficients0[dof][1]; const double coeff0_2 = coefficients0[dof][2]; // Compute value(s) *values = coeff0_0*basisvalue0 + coeff0_1*basisvalue1 + coeff0_2*basisvalue2; } /// Evaluate all basis functions at given point in cell virtual void evaluate_basis_all(double* values, const double* coordinates, const ufc::cell& c) const { throw std::runtime_error("The vectorised version of evaluate_basis() is not yet implemented."); } /// Evaluate order n derivatives of basis function i at given point in cell virtual void evaluate_basis_derivatives(unsigned int i, unsigned int n, double* values, const double* coordinates, const ufc::cell& c) const { // Extract vertex coordinates const double * const * element_coordinates = c.coordinates; // Compute Jacobian of affine map from reference cell const double J_00 = element_coordinates[1][0] - element_coordinates[0][0]; const double J_01 = element_coordinates[2][0] - element_coordinates[0][0]; const double J_10 = element_coordinates[1][1] - element_coordinates[0][1]; const double J_11 = element_coordinates[2][1] - element_coordinates[0][1]; // Compute determinant of Jacobian const double detJ = J_00*J_11 - J_01*J_10; // Compute inverse of Jacobian // Get coordinates and map to the reference (UFC) element double x = (element_coordinates[0][1]*element_coordinates[2][0] -\ element_coordinates[0][0]*element_coordinates[2][1] +\ J_11*coordinates[0] - J_01*coordinates[1]) / detJ; double y = (element_coordinates[1][1]*element_coordinates[0][0] -\ element_coordinates[1][0]*element_coordinates[0][1] -\ J_10*coordinates[0] + J_00*coordinates[1]) / detJ; // Map coordinates to the reference square if (std::abs(y - 1.0) < 1e-14) x = -1.0; else x = 2.0 *x/(1.0 - y) - 1.0; y = 2.0*y - 1.0; // Compute number of derivatives unsigned int num_derivatives = 1; for (unsigned int j = 0; j < n; j++) num_derivatives *= 2; // Declare pointer to two dimensional array that holds combinations of derivatives and initialise unsigned int **combinations = new unsigned int *[num_derivatives]; for (unsigned int j = 0; j < num_derivatives; j++) { combinations[j] = new unsigned int [n]; for (unsigned int k = 0; k < n; k++) combinations[j][k] = 0; } // Generate combinations of derivatives for (unsigned int row = 1; row < num_derivatives; row++) { for (unsigned int num = 0; num < row; num++) { for (unsigned int col = n-1; col+1 > 0; col--) { if (combinations[row][col] + 1 > 1) combinations[row][col] = 0; else { combinations[row][col] += 1; break; } } } } // Compute inverse of Jacobian const double Jinv[2][2] = {{J_11 / detJ, -J_01 / detJ}, {-J_10 / detJ, J_00 / detJ}}; // Declare transformation matrix // Declare pointer to two dimensional array and initialise double **transform = new double *[num_derivatives]; for (unsigned int j = 0; j < num_derivatives; j++) { transform[j] = new double [num_derivatives]; for (unsigned int k = 0; k < num_derivatives; k++) transform[j][k] = 1; } // Construct transformation matrix for (unsigned int row = 0; row < num_derivatives; row++) { for (unsigned int col = 0; col < num_derivatives; col++) { for (unsigned int k = 0; k < n; k++) transform[row][col] *= Jinv[combinations[col][k]][combinations[row][k]]; } } // Reset values for (unsigned int j = 0; j < 1*num_derivatives; j++) values[j] = 0; // Map degree of freedom to element degree of freedom const unsigned int dof = i; // Generate scalings const double scalings_y_0 = 1; const double scalings_y_1 = scalings_y_0*(0.5 - 0.5*y); // Compute psitilde_a const double psitilde_a_0 = 1; const double psitilde_a_1 = x; // Compute psitilde_bs const double psitilde_bs_0_0 = 1; const double psitilde_bs_0_1 = 1.5*y + 0.5; const double psitilde_bs_1_0 = 1; // Compute basisvalues const double basisvalue0 = 0.707106781186548*psitilde_a_0*scalings_y_0*psitilde_bs_0_0; const double basisvalue1 = 1.73205080756888*psitilde_a_1*scalings_y_1*psitilde_bs_1_0; const double basisvalue2 = psitilde_a_0*scalings_y_0*psitilde_bs_0_1; // Table(s) of coefficients const static double coefficients0[3][3] = \ {{0.471404520791032, -0.288675134594813, -0.166666666666667}, {0.471404520791032, 0.288675134594813, -0.166666666666667}, {0.471404520791032, 0, 0.333333333333333}}; // Interesting (new) part // Tables of derivatives of the polynomial base (transpose) const static double dmats0[3][3] = \ {{0, 0, 0}, {4.89897948556636, 0, 0}, {0, 0, 0}}; const static double dmats1[3][3] = \ {{0, 0, 0}, {2.44948974278318, 0, 0}, {4.24264068711928, 0, 0}}; // Compute reference derivatives // Declare pointer to array of derivatives on FIAT element double *derivatives = new double [num_derivatives]; // Declare coefficients double coeff0_0 = 0; double coeff0_1 = 0; double coeff0_2 = 0; // Declare new coefficients double new_coeff0_0 = 0; double new_coeff0_1 = 0; double new_coeff0_2 = 0; // Loop possible derivatives for (unsigned int deriv_num = 0; deriv_num < num_derivatives; deriv_num++) { // Get values from coefficients array new_coeff0_0 = coefficients0[dof][0]; new_coeff0_1 = coefficients0[dof][1]; new_coeff0_2 = coefficients0[dof][2]; // Loop derivative order for (unsigned int j = 0; j < n; j++) { // Update old coefficients coeff0_0 = new_coeff0_0; coeff0_1 = new_coeff0_1; coeff0_2 = new_coeff0_2; if(combinations[deriv_num][j] == 0) { new_coeff0_0 = coeff0_0*dmats0[0][0] + coeff0_1*dmats0[1][0] + coeff0_2*dmats0[2][0]; new_coeff0_1 = coeff0_0*dmats0[0][1] + coeff0_1*dmats0[1][1] + coeff0_2*dmats0[2][1]; new_coeff0_2 = coeff0_0*dmats0[0][2] + coeff0_1*dmats0[1][2] + coeff0_2*dmats0[2][2]; } if(combinations[deriv_num][j] == 1) { new_coeff0_0 = coeff0_0*dmats1[0][0] + coeff0_1*dmats1[1][0] + coeff0_2*dmats1[2][0]; new_coeff0_1 = coeff0_0*dmats1[0][1] + coeff0_1*dmats1[1][1] + coeff0_2*dmats1[2][1]; new_coeff0_2 = coeff0_0*dmats1[0][2] + coeff0_1*dmats1[1][2] + coeff0_2*dmats1[2][2]; } } // Compute derivatives on reference element as dot product of coefficients and basisvalues derivatives[deriv_num] = new_coeff0_0*basisvalue0 + new_coeff0_1*basisvalue1 + new_coeff0_2*basisvalue2; } // Transform derivatives back to physical element for (unsigned int row = 0; row < num_derivatives; row++) { for (unsigned int col = 0; col < num_derivatives; col++) { values[row] += transform[row][col]*derivatives[col]; } } // Delete pointer to array of derivatives on FIAT element delete [] derivatives; // Delete pointer to array of combinations of derivatives and transform for (unsigned int row = 0; row < num_derivatives; row++) { delete [] combinations[row]; delete [] transform[row]; } delete [] combinations; delete [] transform; } /// Evaluate order n derivatives of all basis functions at given point in cell virtual void evaluate_basis_derivatives_all(unsigned int n, double* values, const double* coordinates, const ufc::cell& c) const { throw std::runtime_error("The vectorised version of evaluate_basis_derivatives() is not yet implemented."); } /// Evaluate linear functional for dof i on the function f virtual double evaluate_dof(unsigned int i, const ufc::function& f, const ufc::cell& c) const { // The reference points, direction and weights: const static double X[3][1][2] = {{{0, 0}}, {{1, 0}}, {{0, 1}}}; const static double W[3][1] = {{1}, {1}, {1}}; const static double D[3][1][1] = {{{1}}, {{1}}, {{1}}}; const double * const * x = c.coordinates; double result = 0.0; // Iterate over the points: // Evaluate basis functions for affine mapping const double w0 = 1.0 - X[i][0][0] - X[i][0][1]; const double w1 = X[i][0][0]; const double w2 = X[i][0][1]; // Compute affine mapping y = F(X) double y[2]; y[0] = w0*x[0][0] + w1*x[1][0] + w2*x[2][0]; y[1] = w0*x[0][1] + w1*x[1][1] + w2*x[2][1]; // Evaluate function at physical points double values[1]; f.evaluate(values, y, c); // Map function values using appropriate mapping // Affine map: Do nothing // Note that we do not map the weights (yet). // Take directional components for(int k = 0; k < 1; k++) result += values[k]*D[i][0][k]; // Multiply by weights result *= W[i][0]; return result; } /// Evaluate linear functionals for all dofs on the function f virtual void evaluate_dofs(double* values, const ufc::function& f, const ufc::cell& c) const { throw std::runtime_error("Not implemented (introduced in UFC v1.1)."); } /// Interpolate vertex values from dof values virtual void interpolate_vertex_values(double* vertex_values, const double* dof_values, const ufc::cell& c) const { // Evaluate at vertices and use affine mapping vertex_values[0] = dof_values[0]; vertex_values[1] = dof_values[1]; vertex_values[2] = dof_values[2]; } /// Return the number of sub elements (for a mixed element) virtual unsigned int num_sub_elements() const { return 1; } /// Create a new finite element for sub element i (for a mixed element) virtual ufc::finite_element* create_sub_element(unsigned int i) const { return new PoissonBilinearForm_finite_element_1(); } }; /// This class defines the interface for a local-to-global mapping of /// degrees of freedom (dofs). class PoissonBilinearForm_dof_map_0: public ufc::dof_map { private: unsigned int __global_dimension; public: /// Constructor PoissonBilinearForm_dof_map_0() : ufc::dof_map() { __global_dimension = 0; } /// Destructor virtual ~PoissonBilinearForm_dof_map_0() { // Do nothing } /// Return a string identifying the dof map virtual const char* signature() const { return "FFC dof map for FiniteElement('Lagrange', 'triangle', 1)"; } /// Return true iff mesh entities of topological dimension d are needed virtual bool needs_mesh_entities(unsigned int d) const { switch (d) { case 0: return true; break; case 1: return false; break; case 2: return false; break; } return false; } /// Initialize dof map for mesh (return true iff init_cell() is needed) virtual bool init_mesh(const ufc::mesh& m) { __global_dimension = m.num_entities[0]; return false; } /// Initialize dof map for given cell virtual void init_cell(const ufc::mesh& m, const ufc::cell& c) { // Do nothing } /// Finish initialization of dof map for cells virtual void init_cell_finalize() { // Do nothing } /// Return the dimension of the global finite element function space virtual unsigned int global_dimension() const { return __global_dimension; } /// Return the dimension of the local finite element function space virtual unsigned int local_dimension() const { return 3; } // Return the geometric dimension of the coordinates this dof map provides virtual unsigned int geometric_dimension() const { return 2; } /// Return the number of dofs on each cell facet virtual unsigned int num_facet_dofs() const { return 2; } /// Return the number of dofs associated with each cell entity of dimension d virtual unsigned int num_entity_dofs(unsigned int d) const { throw std::runtime_error("Not implemented (introduced in UFC v1.1)."); } /// Tabulate the local-to-global mapping of dofs on a cell virtual void tabulate_dofs(unsigned int* dofs, const ufc::mesh& m, const ufc::cell& c) const { dofs[0] = c.entity_indices[0][0]; dofs[1] = c.entity_indices[0][1]; dofs[2] = c.entity_indices[0][2]; } /// Tabulate the local-to-local mapping from facet dofs to cell dofs virtual void tabulate_facet_dofs(unsigned int* dofs, unsigned int facet) const { switch (facet) { case 0: dofs[0] = 1; dofs[1] = 2; break; case 1: dofs[0] = 0; dofs[1] = 2; break; case 2: dofs[0] = 0; dofs[1] = 1; break; } } /// Tabulate the local-to-local mapping of dofs on entity (d, i) virtual void tabulate_entity_dofs(unsigned int* dofs, unsigned int d, unsigned int i) const { throw std::runtime_error("Not implemented (introduced in UFC v1.1)."); } /// Tabulate the coordinates of all dofs on a cell virtual void tabulate_coordinates(double** coordinates, const ufc::cell& c) const { const double * const * x = c.coordinates; coordinates[0][0] = x[0][0]; coordinates[0][1] = x[0][1]; coordinates[1][0] = x[1][0]; coordinates[1][1] = x[1][1]; coordinates[2][0] = x[2][0]; coordinates[2][1] = x[2][1]; } /// Return the number of sub dof maps (for a mixed element) virtual unsigned int num_sub_dof_maps() const { return 1; } /// Create a new dof_map for sub dof map i (for a mixed element) virtual ufc::dof_map* create_sub_dof_map(unsigned int i) const { return new PoissonBilinearForm_dof_map_0(); } }; /// This class defines the interface for a local-to-global mapping of /// degrees of freedom (dofs). class PoissonBilinearForm_dof_map_1: public ufc::dof_map { private: unsigned int __global_dimension; public: /// Constructor PoissonBilinearForm_dof_map_1() : ufc::dof_map() { __global_dimension = 0; } /// Destructor virtual ~PoissonBilinearForm_dof_map_1() { // Do nothing } /// Return a string identifying the dof map virtual const char* signature() const { return "FFC dof map for FiniteElement('Lagrange', 'triangle', 1)"; } /// Return true iff mesh entities of topological dimension d are needed virtual bool needs_mesh_entities(unsigned int d) const { switch (d) { case 0: return true; break; case 1: return false; break; case 2: return false; break; } return false; } /// Initialize dof map for mesh (return true iff init_cell() is needed) virtual bool init_mesh(const ufc::mesh& m) { __global_dimension = m.num_entities[0]; return false; } /// Initialize dof map for given cell virtual void init_cell(const ufc::mesh& m, const ufc::cell& c) { // Do nothing } /// Finish initialization of dof map for cells virtual void init_cell_finalize() { // Do nothing } /// Return the dimension of the global finite element function space virtual unsigned int global_dimension() const { return __global_dimension; } /// Return the dimension of the local finite element function space virtual unsigned int local_dimension() const { return 3; } // Return the geometric dimension of the coordinates this dof map provides virtual unsigned int geometric_dimension() const { return 2; } /// Return the number of dofs on each cell facet virtual unsigned int num_facet_dofs() const { return 2; } /// Return the number of dofs associated with each cell entity of dimension d virtual unsigned int num_entity_dofs(unsigned int d) const { throw std::runtime_error("Not implemented (introduced in UFC v1.1)."); } /// Tabulate the local-to-global mapping of dofs on a cell virtual void tabulate_dofs(unsigned int* dofs, const ufc::mesh& m, const ufc::cell& c) const { dofs[0] = c.entity_indices[0][0]; dofs[1] = c.entity_indices[0][1]; dofs[2] = c.entity_indices[0][2]; } /// Tabulate the local-to-local mapping from facet dofs to cell dofs virtual void tabulate_facet_dofs(unsigned int* dofs, unsigned int facet) const { switch (facet) { case 0: dofs[0] = 1; dofs[1] = 2; break; case 1: dofs[0] = 0; dofs[1] = 2; break; case 2: dofs[0] = 0; dofs[1] = 1; break; } } /// Tabulate the local-to-local mapping of dofs on entity (d, i) virtual void tabulate_entity_dofs(unsigned int* dofs, unsigned int d, unsigned int i) const { throw std::runtime_error("Not implemented (introduced in UFC v1.1)."); } /// Tabulate the coordinates of all dofs on a cell virtual void tabulate_coordinates(double** coordinates, const ufc::cell& c) const { const double * const * x = c.coordinates; coordinates[0][0] = x[0][0]; coordinates[0][1] = x[0][1]; coordinates[1][0] = x[1][0]; coordinates[1][1] = x[1][1]; coordinates[2][0] = x[2][0]; coordinates[2][1] = x[2][1]; } /// Return the number of sub dof maps (for a mixed element) virtual unsigned int num_sub_dof_maps() const { return 1; } /// Create a new dof_map for sub dof map i (for a mixed element) virtual ufc::dof_map* create_sub_dof_map(unsigned int i) const { return new PoissonBilinearForm_dof_map_1(); } }; /// This class defines the interface for the tabulation of the cell /// tensor corresponding to the local contribution to a form from /// the integral over a cell. class PoissonBilinearForm_cell_integral_0_quadrature: public ufc::cell_integral { public: /// Constructor PoissonBilinearForm_cell_integral_0_quadrature() : ufc::cell_integral() { // Do nothing } /// Destructor virtual ~PoissonBilinearForm_cell_integral_0_quadrature() { // Do nothing } /// Tabulate the tensor for the contribution from a local cell virtual void tabulate_tensor(double* A, const double * const * w, const ufc::cell& c) const { // Extract vertex coordinates const double * const * x = c.coordinates; // Compute Jacobian of affine map from reference cell const double J_00 = x[1][0] - x[0][0]; const double J_01 = x[2][0] - x[0][0]; const double J_10 = x[1][1] - x[0][1]; const double J_11 = x[2][1] - x[0][1]; // Compute determinant of Jacobian double detJ = J_00*J_11 - J_01*J_10; // Compute inverse of Jacobian const double Jinv_00 = J_11 / detJ; const double Jinv_01 = -J_01 / detJ; const double Jinv_10 = -J_10 / detJ; const double Jinv_11 = J_00 / detJ; // Set scale factor const double det = std::abs(detJ); // Array of quadrature weights const static double W1 = 0.5; const static double FE0_D10[1][3] = \ {{-1, 1, 0}}; const static double FE0_D01[1][3] = \ {{-1, 0, 1}}; // Compute element tensor using UFL quadrature representation // Optimisations: ('simplify expressions', False), ('ignore zero tables', False), ('non zero columns', False), ('remove zero terms', False), ('ignore ones', False) // Total number of operations to compute element tensor: 162 // Loop quadrature points for integral // Number of operations to compute element tensor for following IP loop = 162 // Only 1 integration point, omitting IP loop. // Number of operations for primary indices = 162 for (unsigned int j = 0; j < 3; j++) { for (unsigned int k = 0; k < 3; k++) { // Number of operations to compute entry = 18 A[j*3 + k] += ((Jinv_00*FE0_D10[0][j] + Jinv_10*FE0_D01[0][j])*(Jinv_00*FE0_D10[0][k] + Jinv_10*FE0_D01[0][k]) + (Jinv_01*FE0_D10[0][j] + Jinv_11*FE0_D01[0][j])*(Jinv_01*FE0_D10[0][k] + Jinv_11*FE0_D01[0][k]))*W1*det; }// end loop over 'k' }// end loop over 'j' } }; /// This class defines the interface for the tabulation of the cell /// tensor corresponding to the local contribution to a form from /// the integral over a cell. class PoissonBilinearForm_cell_integral_0: public ufc::cell_integral { private: PoissonBilinearForm_cell_integral_0_quadrature integral_0_quadrature; public: /// Constructor PoissonBilinearForm_cell_integral_0() : ufc::cell_integral() { // Do nothing } /// Destructor virtual ~PoissonBilinearForm_cell_integral_0() { // Do nothing } /// Tabulate the tensor for the contribution from a local cell virtual void tabulate_tensor(double* A, const double * const * w, const ufc::cell& c) const { // Reset values of the element tensor block A[0] = 0; A[1] = 0; A[2] = 0; A[3] = 0; A[4] = 0; A[5] = 0; A[6] = 0; A[7] = 0; A[8] = 0; // Add all contributions to element tensor integral_0_quadrature.tabulate_tensor(A, w, c); } }; /// This class defines the interface for the assembly of the global /// tensor corresponding to a form with r + n arguments, that is, a /// mapping /// /// a : V1 x V2 x ... Vr x W1 x W2 x ... x Wn -> R /// /// with arguments v1, v2, ..., vr, w1, w2, ..., wn. The rank r /// global tensor A is defined by /// /// A = a(V1, V2, ..., Vr, w1, w2, ..., wn), /// /// where each argument Vj represents the application to the /// sequence of basis functions of Vj and w1, w2, ..., wn are given /// fixed functions (coefficients). class PoissonBilinearForm: public ufc::form { public: /// Constructor PoissonBilinearForm() : ufc::form() { // Do nothing } /// Destructor virtual ~PoissonBilinearForm() { // Do nothing } /// Return a string identifying the form virtual const char* signature() const { return "Form([Integral(IndexSum(Product(Indexed(ComponentTensor(SpatialDerivative(BasisFunction(FiniteElement('Lagrange', Cell('triangle', 1), 1), 0), MultiIndex((Index(0),), {Index(0): 2})), MultiIndex((Index(0),), {Index(0): 2})), MultiIndex((Index(1),), {Index(1): 2})), Indexed(ComponentTensor(SpatialDerivative(BasisFunction(FiniteElement('Lagrange', Cell('triangle', 1), 1), 1), MultiIndex((Index(2),), {Index(2): 2})), MultiIndex((Index(2),), {Index(2): 2})), MultiIndex((Index(1),), {Index(1): 2}))), MultiIndex((Index(1),), {Index(1): 2})), Measure('cell', 0, None))])"; } /// Return the rank of the global tensor (r) virtual unsigned int rank() const { return 2; } /// Return the number of coefficients (n) virtual unsigned int num_coefficients() const { return 0; } /// Return the number of cell integrals virtual unsigned int num_cell_integrals() const { return 1; } /// Return the number of exterior facet integrals virtual unsigned int num_exterior_facet_integrals() const { return 0; } /// Return the number of interior facet integrals virtual unsigned int num_interior_facet_integrals() const { return 0; } /// Create a new finite element for argument function i virtual ufc::finite_element* create_finite_element(unsigned int i) const { switch (i) { case 0: return new PoissonBilinearForm_finite_element_0(); break; case 1: return new PoissonBilinearForm_finite_element_1(); break; } return 0; } /// Create a new dof map for argument function i virtual ufc::dof_map* create_dof_map(unsigned int i) const { switch (i) { case 0: return new PoissonBilinearForm_dof_map_0(); break; case 1: return new PoissonBilinearForm_dof_map_1(); break; } return 0; } /// Create a new cell integral on sub domain i virtual ufc::cell_integral* create_cell_integral(unsigned int i) const { return new PoissonBilinearForm_cell_integral_0(); } /// Create a new exterior facet integral on sub domain i virtual ufc::exterior_facet_integral* create_exterior_facet_integral(unsigned int i) const { return 0; } /// Create a new interior facet integral on sub domain i virtual ufc::interior_facet_integral* create_interior_facet_integral(unsigned int i) const { return 0; } }; #endif ffc-1.6.0/ufc-merge-into-ffc/doc/manual/code/poisson_syfi.h000066400000000000000000000000241255571034100234610ustar00rootroot00000000000000// Add example here ffc-1.6.0/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/000077500000000000000000000000001255571034100236265ustar00rootroot00000000000000ffc-1.6.0/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/dof_map_Lagrange_1_2D.cpp000066400000000000000000000112521255571034100303050ustar00rootroot00000000000000// // This code complies with UFC version 1.0, and is generated with SyFi version 0.4.0. // // http://www.fenics.org/syfi/ // http://www.fenics.org/ufc/ // #include #include #include #include #include #include #include #include #include "dof_map_Lagrange_1_2D.h" namespace pycc { /// Constructor dof_map_Lagrange_1_2D::dof_map_Lagrange_1_2D() : ufc::dof_map() { loc2glob = 0; } /// Destructor dof_map_Lagrange_1_2D::~dof_map_Lagrange_1_2D() { if(loc2glob) delete [] loc2glob; } /// Return a string identifying the dof map const char* dof_map_Lagrange_1_2D::signature() const { return "dof_map_Lagrange_1_2D // generated by SyFi"; } /// Return true iff mesh entities of topological dimension d are needed bool dof_map_Lagrange_1_2D::needs_mesh_entities(unsigned int d) const { switch(d) { case 0: return true; // vertices case 1: return true; // edges case 2: return true; // faces case 3: return false; // volumes } return false; // strange unsupported case or error } /// Initialize dof map for mesh (return true iff init_cell() is needed) bool dof_map_Lagrange_1_2D::init_mesh(const ufc::mesh& m) { int top_dim = 2; num_elements = m.num_entities[top_dim]; return true; } /// Initialize dof map for given cell void dof_map_Lagrange_1_2D::init_cell(const ufc::mesh& m, const ufc::cell& c) { // coordinates double x0 = c.coordinates[0][0]; double y0 = c.coordinates[0][1]; double x1 = c.coordinates[1][0]; double y1 = c.coordinates[1][1]; double x2 = c.coordinates[2][0]; double y2 = c.coordinates[2][1]; // affine map double G00 = x1 - x0; double G01 = x2 - x0; double G10 = y1 - y0; double G11 = y2 - y0; int element = c.entity_indices[2][0]; double dof0[2] = { x0, y0 }; Ptv pdof0(2, dof0); dof.insert_dof(element, 0, pdof0); double dof1[2] = { G00+x0, y0+G10 }; Ptv pdof1(2, dof1); dof.insert_dof(element, 1, pdof1); double dof2[2] = { x0+G01, G11+y0 }; Ptv pdof2(2, dof2); dof.insert_dof(element, 2, pdof2); } /// Finish initialization of dof map for cells void dof_map_Lagrange_1_2D::init_cell_finalize() { loc2glob = new unsigned int[num_elements * local_dimension()]; dof.build_loc2dof(num_elements, local_dimension(), reinterpret_cast(loc2glob)); } /// Return the dimension of the global finite element function space unsigned int dof_map_Lagrange_1_2D::global_dimension() const { return dof.size(); } /// Return the dimension of the local finite element function space unsigned int dof_map_Lagrange_1_2D::local_dimension() const { return 3; } /// Return the number of dofs on each cell facet unsigned int dof_map_Lagrange_1_2D::num_facet_dofs() const { return 2; } /// Tabulate the local-to-global mapping of dofs on a cell void dof_map_Lagrange_1_2D::tabulate_dofs(unsigned int* dofs, const ufc::mesh& m, const ufc::cell& c) const { const unsigned int *from_dofs = loc2glob + (3 * c.entity_indices[2][0]); memcpy(dofs, from_dofs, sizeof(unsigned int)*3); } /// Tabulate the local-to-local mapping from facet dofs to cell dofs void dof_map_Lagrange_1_2D::tabulate_facet_dofs(unsigned int* dofs, unsigned int facet) const { switch(facet) { case 0: dofs[0] = 1; dofs[1] = 2; break; case 1: dofs[0] = 0; dofs[1] = 2; break; case 2: dofs[0] = 0; dofs[1] = 1; break; default: throw std::runtime_error("Invalid facet number."); } } /// Tabulate the coordinates of all dofs on a cell void dof_map_Lagrange_1_2D::tabulate_coordinates(double** coordinates, const ufc::cell& c) const { // coordinates double x0 = c.coordinates[0][0]; double y0 = c.coordinates[0][1]; double x1 = c.coordinates[1][0]; double y1 = c.coordinates[1][1]; double x2 = c.coordinates[2][0]; double y2 = c.coordinates[2][1]; // affine map double G00 = x1 - x0; double G01 = x2 - x0; double G10 = y1 - y0; double G11 = y2 - y0; coordinates[0][0] = x0; coordinates[0][1] = y0; coordinates[1][0] = G00+x0; coordinates[1][1] = y0+G10; coordinates[2][0] = x0+G01; coordinates[2][1] = G11+y0; } /// Return the number of sub dof maps (for a mixed element) unsigned int dof_map_Lagrange_1_2D::num_sub_dof_maps() const { return 1; } /// Create a new dof_map for sub dof map i (for a mixed element) ufc::dof_map* dof_map_Lagrange_1_2D::create_sub_dof_map(unsigned int i) const { return new dof_map_Lagrange_1_2D(); } } // namespace ffc-1.6.0/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/dof_map_Lagrange_1_2D.h000066400000000000000000000050571255571034100277600ustar00rootroot00000000000000// // This code complies with UFC version 1.0, and is generated with SyFi version 0.4.0. // // http://www.fenics.org/syfi/ // http://www.fenics.org/ufc/ // #ifndef __dof_map_Lagrange_1_2D_H #define __dof_map_Lagrange_1_2D_H #include #include #include #include #include #include #include #include namespace pycc { /// This class defines the interface for a local-to-global mapping of /// degrees of freedom (dofs). class dof_map_Lagrange_1_2D: public ufc::dof_map { public: pycc::Dof_Ptv dof; unsigned int num_elements; unsigned int * loc2glob; public: /// Constructor dof_map_Lagrange_1_2D(); /// Destructor virtual ~dof_map_Lagrange_1_2D(); /// Return a string identifying the dof map virtual const char* signature() const; /// Return true iff mesh entities of topological dimension d are needed virtual bool needs_mesh_entities(unsigned int d) const; /// Initialize dof map for mesh (return true iff init_cell() is needed) virtual bool init_mesh(const ufc::mesh& m); /// Initialize dof map for given cell virtual void init_cell(const ufc::mesh& m, const ufc::cell& c); /// Finish initialization of dof map for cells virtual void init_cell_finalize(); /// Return the dimension of the global finite element function space virtual unsigned int global_dimension() const; /// Return the dimension of the local finite element function space virtual unsigned int local_dimension() const; /// Return the number of dofs on each cell facet virtual unsigned int num_facet_dofs() const; /// Tabulate the local-to-global mapping of dofs on a cell virtual void tabulate_dofs(unsigned int* dofs, const ufc::mesh& m, const ufc::cell& c) const; /// Tabulate the local-to-local mapping from facet dofs to cell dofs virtual void tabulate_facet_dofs(unsigned int* dofs, unsigned int facet) const; /// Tabulate the coordinates of all dofs on a cell virtual void tabulate_coordinates(double** coordinates, const ufc::cell& c) const; /// Return the number of sub dof maps (for a mixed element) virtual unsigned int num_sub_dof_maps() const; /// Create a new dof_map for sub dof map i (for a mixed element) virtual ufc::dof_map* create_sub_dof_map(unsigned int i) const; }; } // namespace #endif ffc-1.6.0/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/fe_Lagrange_1_2D.cpp000066400000000000000000000073411255571034100272760ustar00rootroot00000000000000// // This code complies with UFC version 1.0, and is generated with SyFi version 0.4.0. // // http://www.fenics.org/syfi/ // http://www.fenics.org/ufc/ // #include #include #include #include #include #include #include #include #include "fe_Lagrange_1_2D.h" namespace pycc { /// Constructor fe_Lagrange_1_2D::fe_Lagrange_1_2D() : ufc::finite_element() { } /// Destructor fe_Lagrange_1_2D::~fe_Lagrange_1_2D() { } /// Return a string identifying the finite element const char* fe_Lagrange_1_2D::signature() const { return "fe_Lagrange_1_2D // generated by SyFi"; } /// Return the cell shape ufc::shape fe_Lagrange_1_2D::cell_shape() const { return ufc::triangle; } /// Return the dimension of the finite element function space unsigned int fe_Lagrange_1_2D::space_dimension() const { return 3; } /// Return the rank of the value space unsigned int fe_Lagrange_1_2D::value_rank() const { return 0; } /// Return the dimension of the value space for axis i unsigned int fe_Lagrange_1_2D::value_dimension(unsigned int i) const { return 1; } /// Evaluate basis function i at given point in cell void fe_Lagrange_1_2D::evaluate_basis(unsigned int i, double* values, const double* coordinates, const ufc::cell& c) const { const double x = coordinates[0]; const double y = coordinates[1]; switch(i) { case 0: values[0] = -x-y+1.0; break; case 1: values[0] = x; break; case 2: values[0] = y; break; } } /// Evaluate order n derivatives of basis function i at given point in cell void fe_Lagrange_1_2D::evaluate_basis_derivatives(unsigned int i, unsigned int n, double* values, const double* coordinates, const ufc::cell& c) const { throw std::runtime_error("gen_evaluate_basis_derivatives not implemented yet."); } /// Evaluate linear functional for dof i on the function f double fe_Lagrange_1_2D::evaluate_dof(unsigned int i, const ufc::function& f, const ufc::cell& c) const { // coordinates double x0 = c.coordinates[0][0]; double y0 = c.coordinates[0][1]; double x1 = c.coordinates[1][0]; double y1 = c.coordinates[1][1]; double x2 = c.coordinates[2][0]; double y2 = c.coordinates[2][1]; // affine map double G00 = x1 - x0; double G01 = x2 - x0; double G10 = y1 - y0; double G11 = y2 - y0; double v[1]; double x[2]; switch(i) { case 0: x[0] = x0; x[1] = y0; break; case 1: x[0] = x0+G00; x[1] = G10+y0; break; case 2: x[0] = G01+x0; x[1] = y0+G11; break; } f.evaluate(v, x, c); return v[i % 1]; } /// Interpolate vertex values from dof values void fe_Lagrange_1_2D::interpolate_vertex_values(double* vertex_values, const double* dof_values, const ufc::cell& c) const { vertex_values[0] = dof_values[0]; vertex_values[1] = dof_values[1]; vertex_values[2] = dof_values[2]; } /// Return the number of sub elements (for a mixed element) unsigned int fe_Lagrange_1_2D::num_sub_elements() const { return 1; } /// Create a new finite element for sub element i (for a mixed element) ufc::finite_element* fe_Lagrange_1_2D::create_sub_element(unsigned int i) const { return new fe_Lagrange_1_2D(); } } // namespace ffc-1.6.0/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/fe_Lagrange_1_2D.h000066400000000000000000000050411255571034100267360ustar00rootroot00000000000000// // This code complies with UFC version 1.0, and is generated with SyFi version 0.4.0. // // http://www.fenics.org/syfi/ // http://www.fenics.org/ufc/ // #ifndef __fe_Lagrange_1_2D_H #define __fe_Lagrange_1_2D_H #include #include #include #include #include #include #include #include namespace pycc { /// This class defines the interface for a finite element. class fe_Lagrange_1_2D: public ufc::finite_element { public: /// Constructor fe_Lagrange_1_2D(); /// Destructor virtual ~fe_Lagrange_1_2D(); /// Return a string identifying the finite element virtual const char* signature() const; /// Return the cell shape virtual ufc::shape cell_shape() const; /// Return the dimension of the finite element function space virtual unsigned int space_dimension() const; /// Return the rank of the value space virtual unsigned int value_rank() const; /// Return the dimension of the value space for axis i virtual unsigned int value_dimension(unsigned int i) const; /// Evaluate basis function i at given point in cell virtual void evaluate_basis(unsigned int i, double* values, const double* coordinates, const ufc::cell& c) const; /// Evaluate order n derivatives of basis function i at given point in cell virtual void evaluate_basis_derivatives(unsigned int i, unsigned int n, double* values, const double* coordinates, const ufc::cell& c) const; /// Evaluate linear functional for dof i on the function f virtual double evaluate_dof(unsigned int i, const ufc::function& f, const ufc::cell& c) const; /// Interpolate vertex values from dof values virtual void interpolate_vertex_values(double* vertex_values, const double* dof_values, const ufc::cell& c) const; /// Return the number of sub elements (for a mixed element) virtual unsigned int num_sub_elements() const; /// Create a new finite element for sub element i (for a mixed element) virtual ufc::finite_element* create_sub_element(unsigned int i) const; }; } // namespace #endif ffc-1.6.0/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/form__stiffness_form__Lagrange_1_2D.cpp000066400000000000000000000126361255571034100332570ustar00rootroot00000000000000// // This code complies with UFC version 1.0, and is generated with SyFi version 0.4.0. // // http://www.fenics.org/syfi/ // http://www.fenics.org/ufc/ // #include #include #include #include #include #include #include #include #include "dof_map_Lagrange_1_2D.h" #include "fe_Lagrange_1_2D.h" #include "form__stiffness_form__Lagrange_1_2D.h" namespace pycc { /// This class defines the interface for the tabulation of the cell /// tensor corresponding to the local contribution to a form from /// the integral over a cell. class cell_itg__stiffness_form__Lagrange_1_2D: public ufc::cell_integral { public: /// Constructor cell_itg__stiffness_form__Lagrange_1_2D(); /// Destructor virtual ~cell_itg__stiffness_form__Lagrange_1_2D(); /// Tabulate the tensor for the contribution from a local cell virtual void tabulate_tensor(double* A, const double * const * w, const ufc::cell& c) const; }; /// Constructor cell_itg__stiffness_form__Lagrange_1_2D::cell_itg__stiffness_form__Lagrange_1_2D() : ufc::cell_integral() { } /// Destructor cell_itg__stiffness_form__Lagrange_1_2D::~cell_itg__stiffness_form__Lagrange_1_2D() { } /// Tabulate the tensor for the contribution from a local cell void cell_itg__stiffness_form__Lagrange_1_2D::tabulate_tensor(double* A, const double * const * w, const ufc::cell& c) const { // coordinates double x0 = c.coordinates[0][0]; double y0 = c.coordinates[0][1]; double x1 = c.coordinates[1][0]; double y1 = c.coordinates[1][1]; double x2 = c.coordinates[2][0]; double y2 = c.coordinates[2][1]; // affine map double G00 = x1 - x0; double G01 = x2 - x0; double G10 = y1 - y0; double G11 = y2 - y0; double detG_tmp = G00*G11-G01*G10; double detG = fabs(detG_tmp); double GinvT00 = G11 / detG_tmp; double GinvT01 = -G10 / detG_tmp; double GinvT10 = -G01 / detG_tmp; double GinvT11 = G00 / detG_tmp; memset(A, 0, sizeof(double)*9); A[3*0 + 0] = detG*((GinvT01*GinvT01)/2.0+(GinvT11*GinvT11)/2.0+GinvT10*GinvT00 +GinvT01*GinvT11+(GinvT00*GinvT00)/2.0+(GinvT10*GinvT10)/2.0); A[3*0 + 1] = detG*(-(GinvT01*GinvT01)/2.0-GinvT10*GinvT00/2.0 -GinvT01*GinvT11/2.0-(GinvT00*GinvT00)/2.0); A[3*0 + 2] = detG*(-(GinvT11*GinvT11)/2.0-GinvT10*GinvT00/2.0 -GinvT01*GinvT11/2.0-(GinvT10*GinvT10)/2.0); A[3*1 + 0] = detG*(-(GinvT01*GinvT01)/2.0-GinvT10*GinvT00/2.0 -GinvT01*GinvT11/2.0-(GinvT00*GinvT00)/2.0); A[3*1 + 1] = detG*((GinvT01*GinvT01)/2.0+(GinvT00*GinvT00)/2.0); A[3*1 + 2] = detG*(GinvT10*GinvT00/2.0+GinvT01*GinvT11/2.0); A[3*2 + 0] = detG*(-(GinvT11*GinvT11)/2.0-GinvT10*GinvT00/2.0 -GinvT01*GinvT11/2.0-(GinvT10*GinvT10)/2.0); A[3*2 + 1] = detG*(GinvT10*GinvT00/2.0+GinvT01*GinvT11/2.0); A[3*2 + 2] = detG*((GinvT11*GinvT11)/2.0+(GinvT10*GinvT10)/2.0); } /// Constructor form__stiffness_form__Lagrange_1_2D::form__stiffness_form__Lagrange_1_2D() : ufc::form() { } /// Destructor form__stiffness_form__Lagrange_1_2D::~form__stiffness_form__Lagrange_1_2D() { } /// Return a string identifying the form const char* form__stiffness_form__Lagrange_1_2D::signature() const { return "form__stiffness_form__Lagrange_1_2D // generated by SyFi"; } /// Return the rank of the global tensor (r) unsigned int form__stiffness_form__Lagrange_1_2D::rank() const { return 2; } /// Return the number of coefficients (n) unsigned int form__stiffness_form__Lagrange_1_2D::num_coefficients() const { return 0; } /// Return the number of cell integrals unsigned int form__stiffness_form__Lagrange_1_2D::num_cell_integrals() const { return 1; } /// Return the number of exterior facet integrals unsigned int form__stiffness_form__Lagrange_1_2D::num_exterior_facet_integrals() const { return 0; } /// Return the number of interior facet integrals unsigned int form__stiffness_form__Lagrange_1_2D::num_interior_facet_integrals() const { return 0; } /// Create a new finite element for argument function i ufc::finite_element* form__stiffness_form__Lagrange_1_2D:: create_finite_element(unsigned int i) const { switch(i) { case 0: return new fe_Lagrange_1_2D(); case 1: return new fe_Lagrange_1_2D(); } throw std::runtime_error("Invalid index in create_finite_element()"); } /// Create a new dof map for argument function i ufc::dof_map* form__stiffness_form__Lagrange_1_2D::create_dof_map(unsigned int i) const { switch(i) { case 0: return new dof_map_Lagrange_1_2D(); case 1: return new dof_map_Lagrange_1_2D(); } throw std::runtime_error("Invalid index in create_dof_map()"); } /// Create a new cell integral on sub domain i ufc::cell_integral* form__stiffness_form__Lagrange_1_2D:: create_cell_integral(unsigned int i) const { return new cell_itg__stiffness_form__Lagrange_1_2D(); } /// Create a new exterior facet integral on sub domain i ufc::exterior_facet_integral* form__stiffness_form__Lagrange_1_2D:: create_exterior_facet_integral(unsigned int i) const { return 0; } /// Create a new interior facet integral on sub domain i ufc::interior_facet_integral* form__stiffness_form__Lagrange_1_2D:: create_interior_facet_integral(unsigned int i) const { return 0; } } // namespace ffc-1.6.0/ufc-merge-into-ffc/doc/manual/code/stiffness_syfi/form__stiffness_form__Lagrange_1_2D.h000066400000000000000000000051101255571034100327110ustar00rootroot00000000000000// // This code complies with UFC version 1.0, and is generated with SyFi version 0.4.0. // // http://www.fenics.org/syfi/ // http://www.fenics.org/ufc/ // #ifndef __form__stiffness_form__Lagrange_1_2D_H #define __form__stiffness_form__Lagrange_1_2D_H #include #include #include #include #include #include #include #include #include "dof_map_Lagrange_1_2D.h" #include "fe_Lagrange_1_2D.h" namespace pycc { /// This class defines the interface for the assembly of the global /// tensor corresponding to a form with r + n arguments, that is, a /// mapping /// /// a : V1 x V2 x ... Vr x W1 x W2 x ... x Wn -> R /// /// with arguments v1, v2, ..., vr, w1, w2, ..., wn. The rank r /// global tensor A is defined by /// /// A = a(V1, V2, ..., Vr, w1, w2, ..., wn), /// /// where each argument Vj represents the application to the /// sequence of basis functions of Vj and w1, w2, ..., wn are given /// fixed functions (coefficients). class form__stiffness_form__Lagrange_1_2D: public ufc::form { public: /// Constructor form__stiffness_form__Lagrange_1_2D(); /// Destructor virtual ~form__stiffness_form__Lagrange_1_2D(); /// Return a string identifying the form virtual const char* signature() const; /// Return the rank of the global tensor (r) virtual unsigned int rank() const; /// Return the number of coefficients (n) virtual unsigned int num_coefficients() const; /// Return the number of cell integrals virtual unsigned int num_cell_integrals() const; /// Return the number of exterior facet integrals virtual unsigned int num_exterior_facet_integrals() const; /// Return the number of interior facet integrals virtual unsigned int num_interior_facet_integrals() const; /// Create a new finite element for argument function i virtual ufc::finite_element* create_finite_element(unsigned int i) const; /// Create a new dof map for argument function i virtual ufc::dof_map* create_dof_map(unsigned int i) const; /// Create a new cell integral on sub domain i virtual ufc::cell_integral* create_cell_integral(unsigned int i) const; /// Create a new exterior facet integral on sub domain i virtual ufc::exterior_facet_integral* create_exterior_facet_integral(unsigned int i) const; /// Create a new interior facet integral on sub domain i virtual ufc::interior_facet_integral* create_interior_facet_integral(unsigned int i) const; }; } // namespace #endif ffc-1.6.0/ufc-merge-into-ffc/doc/manual/eps/000077500000000000000000000000001255571034100204455ustar00rootroot00000000000000ffc-1.6.0/ufc-merge-into-ffc/doc/manual/eps/hexahedron.eps000066400000000000000000000073461255571034100233150ustar00rootroot00000000000000%!PS-Adobe-3.0 EPSF-3.0 %%Creator: inkscape 0.44 %%Pages: 1 %%Orientation: Portrait %%BoundingBox: 32 56 543 470 %%HiResBoundingBox: 32.571875 56 542.36875 469.99688 %%EndComments %%Page: 1 1 0 842 translate 0.8 -0.8 scale gsave [1 0 0 1 0 0] concat 0 0 0 setrgbcolor [] 0 setdash 3 setlinewidth 0 setlinejoin 0 setlinecap newpath 140 852.36218 moveto 492.81509 958.50976 lineto stroke gsave [-1.7236793 -0.51858435 0.51858435 -1.7236793 492.81509 958.50976] concat gsave 0 0 0 setrgbcolor newpath 8.7185878 4.0337352 moveto -2.2072895 0.016013256 lineto 8.7185884 -4.0017078 lineto 6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto closepath eofill grestore grestore gsave [1 0 0 -1 40 872.36218] concat gsave /newlatin1font {findfont dup length dict copy dup /Encoding ISOLatin1Encoding put definefont} def /BitstreamVeraSans-Roman-ISOLatin1 /BitstreamVeraSans-Roman newlatin1font 24 scalefont setfont 0 0 0 setrgbcolor newpath 0 0 moveto (v0) show grestore grestore gsave [1 0 0 -1 380 982.36218] concat gsave /BitstreamVeraSans-Roman-ISOLatin1 findfont 24 scalefont setfont 0 0 0 setrgbcolor newpath 0 0 moveto (v1) show grestore grestore gsave [1 0 0 -1 650 842.36218] concat gsave /BitstreamVeraSans-Roman-ISOLatin1 findfont 24 scalefont setfont 0 0 0 setrgbcolor newpath 0 0 moveto (v2) show grestore grestore gsave [1 0 0 -1 40 522.36218] concat gsave /BitstreamVeraSans-Roman-ISOLatin1 findfont 24 scalefont setfont 0 0 0 setrgbcolor newpath 0 0 moveto (v4) show grestore grestore gsave 0.38431373 0.52549022 0.92941177 setrgbcolor newpath 140 852.36218 moveto 440 942.36218 lineto 440 552.36218 lineto 140 522.36218 lineto 140 852.36218 lineto closepath eofill grestore gsave 0.27450982 0.41960785 0.83529413 setrgbcolor newpath 440 942.36218 moveto 640 832.36218 lineto 640 522.36218 lineto 440 552.36218 lineto 440 942.36218 lineto closepath eofill grestore gsave 0.21568628 0.39607844 0.9137255 setrgbcolor newpath 140 522.36218 moveto 440 552.36218 lineto 640 522.36218 lineto 370 502.36218 lineto 140 522.36218 lineto closepath eofill grestore 0.29411766 0.29411766 0.58823532 setrgbcolor [18 6] 0 setdash 3 setlinewidth 0 setlinejoin 0 setlinecap newpath 370 502.36218 moveto 370 772.36218 lineto stroke 0.29411766 0.29411766 0.58823532 setrgbcolor [18 6] 0 setdash 3 setlinewidth 0 setlinejoin 0 setlinecap newpath 140 852.36218 moveto 370 772.36218 lineto 640 832.36218 lineto stroke 0 0 0 setrgbcolor [] 0 setdash 3 setlinewidth 0 setlinejoin 0 setlinecap newpath 140 852.36218 moveto 140 482.36218 lineto stroke gsave [-1.1021457e-16 1.8 -1.8 -1.1021457e-16 140 482.36218] concat gsave 0 0 0 setrgbcolor newpath 8.7185878 4.0337352 moveto -2.2072895 0.016013256 lineto 8.7185884 -4.0017078 lineto 6.97309 -1.6296469 6.9831476 1.6157441 8.7185878 4.0337352 curveto closepath eofill grestore grestore gsave [1 0 0 -1 650 512.36218] concat gsave /BitstreamVeraSans-Roman-ISOLatin1 findfont 24 scalefont setfont 0 0 0 setrgbcolor newpath 0 0 moveto (v6) show grestore grestore gsave [1 0 0 -1 330 482.36218] concat gsave /BitstreamVeraSans-Roman-ISOLatin1 findfont 24 scalefont setfont 0 0 0 setrgbcolor newpath 0 0 moveto (v7) show grestore grestore 0 0 0 setrgbcolor [] 0 setdash 3 setlinewidth 0 setlinejoin 0 setlinecap newpath 140 852.36218 moveto 440 942.36218 lineto 440 552.36218 lineto stroke 0 0 0 setrgbcolor [] 0 setdash 3 setlinewidth 0 setlinejoin 0 setlinecap newpath 140 522.36218 moveto 440 552.36218 lineto 640 522.36218 lineto stroke 0 0 0 setrgbcolor [] 0 setdash 3 setlinewidth 0 setlinejoin 0 setlinecap newpath 440 942.36218 moveto 640 832.36218 lineto 640 522.36218 lineto stroke 0 0 0 setrgbcolor [] 0 setdash 2.8 setlinewidth 0 setlinejoin 0 setlinecap newpath 140 522.36218 moveto 370 502.36218 lineto 640 522.36218 lineto stroke grestore showpage %%EOF ffc-1.6.0/ufc-merge-into-ffc/doc/manual/eps/insertion.eps000066400000000000000000000305561255571034100232010ustar00rootroot00000000000000%!PS-Adobe-3.0 EPSF-3.0 %%BoundingBox: 60 258 582 592 %%HiResBoundingBox: 60.809375 258.244922 581.239844 591.487500 %................................... %%Creator: ESP Ghostscript 81504 (epswrite) %%CreationDate: 2007/09/08 00:52:13 %%DocumentData: Clean7Bit %%LanguageLevel: 2 %%EndComments %%BeginProlog % This copyright applies to everything between here and the %%EndProlog: % Copyright (C) 2004 artofcode LLC, Benicia, CA. All rights reserved. %%BeginResource: procset GS_epswrite_2_0_1001 /GS_epswrite_2_0_1001 80 dict dup begin /PageSize 2 array def/setpagesize{ PageSize aload pop 3 index eq exch 4 index eq and{ pop pop pop}{ PageSize dup 1 5 -1 roll put 0 4 -1 roll put dup null eq {false} {dup where} ifelse{ exch get exec} { pop/setpagedevice where { pop 1 dict dup /PageSize PageSize put setpagedevice} { /setpage where{ pop PageSize aload pop pageparams 3 {exch pop} repeat setpage}if}ifelse}ifelse}ifelse} bind def /!{bind def}bind def/#{load def}!/N/counttomark # /rG{3{3 -1 roll 255 div}repeat setrgbcolor}!/G{255 div setgray}!/K{0 G}! /r6{dup 3 -1 roll rG}!/r5{dup 3 1 roll rG}!/r3{dup rG}! /w/setlinewidth #/J/setlinecap # /j/setlinejoin #/M/setmiterlimit #/d/setdash #/i/setflat # /m/moveto #/l/lineto #/c/rcurveto # /p{N 2 idiv{N -2 roll rlineto}repeat}! /P{N 0 gt{N -2 roll moveto p}if}! /h{p closepath}!/H{P closepath}! /lx{0 rlineto}!/ly{0 exch rlineto}!/v{0 0 6 2 roll c}!/y{2 copy c}! /re{4 -2 roll m exch dup lx exch ly neg lx h}! /^{3 index neg 3 index neg}! /f{P fill}!/f*{P eofill}!/s{H stroke}!/S{P stroke}! /q/gsave #/Q/grestore #/rf{re fill}! /Y{P clip newpath}!/Y*{P eoclip newpath}!/rY{re Y}! /|={pop exch 4 1 roll 1 array astore cvx 3 array astore cvx exch 1 index def exec}! /|{exch string readstring |=}! /+{dup type/nametype eq{2 index 7 add -3 bitshift 2 index mul}if}! /@/currentfile #/${+ @ |}! /B{{2 copy string{readstring pop}aload pop 4 array astore cvx 3 1 roll}repeat pop pop true}! /Ix{[1 0 0 1 11 -2 roll exch neg exch neg]exch}! /,{true exch Ix imagemask}!/If{false exch Ix imagemask}!/I{exch Ix image}! /Ic{exch Ix false 3 colorimage}! /F{/Columns counttomark 3 add -2 roll/Rows exch/K -1/BlackIs1 true>> /CCITTFaxDecode filter}!/FX{<} \newcommand{\@fenicspackagett}{} \newcommand{\fenicspackage}[2]{\renewcommand{\@fenicspackage}{#1}\renewcommand{\@fenicspackagett}{#2}} \newcommand{\package}{\@fenicspackage} \newcommand{\packagett}{\@fenicspackagett} %--- Commands --- \renewcommand{\maketitle}{ \lhead{\textsf{\textbf{\@fenicstitle}}} \rhead{\textsf{\@fenicsauthor}} \pagestyle{fancy} \renewcommand{\footrulewidth}{2pt} \renewcommand{\headrulewidth}{2pt} \thispagestyle{empty} \Large\textsf{\textbf{\@fenicstitle}} \\ \vspace{-0.5cm} \hrule height 2pt \hfill\large\textsf{\today} \vspace{3cm} \@fenicsimage \vfill\large\textsf{\textbf{\@fenicsauthor}} \\ \hrule height 2pt \hfill\large\texttt{www.fenics.org} \newpage \null\vfill \normalsize Visit \texttt{http://www.fenics.org/} for the latest version of this manual. \\ Send comments and suggestions to \texttt{\@fenicspackagett{}-dev@fenics.org}. \pagestyle{empty} \cleardoublepage \tableofcontents \cleardoublepage \pagestyle{fancy}} \newcommand{\fenics}{\textbf{\textsf{\normalsize{FE}\Large{ni}\normalsize{CS}}}} \newcommand{\dolfin}{\textbf{\textsf{DOLFIN}}} \newcommand{\pydolfin}{\textbf{\textsf{PyDOLFIN}}} \newcommand{\ffc}{\textbf{\textsf{FFC}}} \newcommand{\fiat}{\textbf{\textsf{FIAT}}} \newcommand{\fixme}[1]{\ \\ \begin{tabular}{||p{\textwidth}||}\hline\rm\textbf{FIXME:}\rm #1 \\ \hline\end{tabular} \\} \newcommand{\devnote}[1]{$\blacktriangleright$ \emph{Developer's note:} #1} %--- Environments --- \DefineVerbatimEnvironment{code}{Verbatim}{frame=single,rulecolor=\color{blue}} \DefineVerbatimEnvironment{macrocode}{Verbatim}{commandchars=\\\{\},frame=single,rulecolor=\color{blue}} %--- Macros --- \newcommand{\dx}{\, \mathrm{d}x} \newcommand{\dX}{\, \mathrm{d}X} \newcommand{\ds}{\, \mathrm{d}s} \newcommand{\dS}{\, \mathrm{d}S} \newcommand{\R}{\mathbb{R}} ffc-1.6.0/ufc-merge-into-ffc/doc/manual/svg/000077500000000000000000000000001255571034100204555ustar00rootroot00000000000000ffc-1.6.0/ufc-merge-into-ffc/doc/manual/svg/hexahedron.svg000066400000000000000000000240111255571034100233210ustar00rootroot00000000000000 image/svg+xml v0 v1 v2 v4 v6 v7 ffc-1.6.0/ufc-merge-into-ffc/doc/manual/svg/insertion.svg000066400000000000000000000476351255571034100232270ustar00rootroot00000000000000 image/svg+xml i0 i1 i2 j0 j1 j2 1 2 3 1 2 3 A21 ffc-1.6.0/ufc-merge-into-ffc/doc/manual/svg/interval.svg000066400000000000000000000135301255571034100230240ustar00rootroot00000000000000 image/svg+xml 0 1 ffc-1.6.0/ufc-merge-into-ffc/doc/manual/svg/numbering_example_quadrilaterals.svg000066400000000000000000000403751255571034100300050ustar00rootroot00000000000000 image/svg+xml 5 v0 0 1 2 3 4 v1 v2 v3 v0 v3 v2 v1 ffc-1.6.0/ufc-merge-into-ffc/doc/manual/svg/numbering_example_triangles.svg000066400000000000000000000327171255571034100267610ustar00rootroot00000000000000 image/svg+xml 0 1 3 2 v0 v1 v2 v0 v1 v2 ffc-1.6.0/ufc-merge-into-ffc/doc/manual/svg/ordering_example_tetrahedron.svg000066400000000000000000000201271255571034100271230ustar00rootroot00000000000000 image/svg+xml v0 v1 v2 v3 e0 ffc-1.6.0/ufc-merge-into-ffc/doc/manual/svg/ordering_example_triangle.svg000066400000000000000000000135711255571034100264160ustar00rootroot00000000000000 image/svg+xml v0 v1 v2 e0 ffc-1.6.0/ufc-merge-into-ffc/doc/manual/svg/orientation_example_triangles.svg000066400000000000000000000364551255571034100273310ustar00rootroot00000000000000 image/svg+xml v0 v1 v2 v0 v1 v2 ffc-1.6.0/ufc-merge-into-ffc/doc/manual/svg/quadrilateral.svg000066400000000000000000000153771255571034100240450ustar00rootroot00000000000000 image/svg+xml v0 v1 v2 v3 ffc-1.6.0/ufc-merge-into-ffc/doc/manual/svg/tetrahedron.svg000066400000000000000000000211671255571034100235240ustar00rootroot00000000000000 image/svg+xml v0 v1 v2 v3 ffc-1.6.0/ufc-merge-into-ffc/doc/manual/svg/triangle.svg000066400000000000000000000137461255571034100230160ustar00rootroot00000000000000 image/svg+xml v0 v1 v2 ffc-1.6.0/ufc-merge-into-ffc/doc/manual/ufc-user-manual.tex000066400000000000000000000014151255571034100234050ustar00rootroot00000000000000\documentclass{fenicsmanual} \begin{document} \fenicstitle{UFC Specification and User Manual 1.1} \fenicsauthor{Martin Sandve Aln\ae{}s, Anders Logg, Kent-Andre Mardal, Ola Skavhaug, and Hans Petter Langtangen} \fenicspackage{\textbf{\textsf{UFC}}}{ufc} \maketitle \rhead{} \newcommand{\ufc}{UFC} \input{chapters/introduction.tex} \input{chapters/assembly.tex} \input{chapters/interface.tex} \input{chapters/referencecells.tex} \input{chapters/numbering.tex} \newpage \bibliographystyle{siam} \bibliography{bibliography} \appendix \input{chapters/interface_cpp.tex} \input{chapters/assembly_cpp.tex} \input{chapters/examples.tex} \input{chapters/pythonutils.tex} \input{chapters/installation.tex} \input{chapters/versions.tex} \input{chapters/license.tex} \end{document} ffc-1.6.0/ufc-merge-into-ffc/doc/sphinx/000077500000000000000000000000001255571034100177125ustar00rootroot00000000000000ffc-1.6.0/ufc-merge-into-ffc/doc/sphinx/README000066400000000000000000000002631255571034100205730ustar00rootroot00000000000000This file has been moved here from the fenics-web (old fenics-doc) repository. It needs to be checked and possible expanded, then integrated with the online documentation system. ffc-1.6.0/ufc-merge-into-ffc/doc/sphinx/index.rst000066400000000000000000000106621255571034100215600ustar00rootroot00000000000000.. UFC documentation .. _ufc_introduction: ################# UFC documentation ################# ============ Introduction ============ Large parts of a finite element program are similar from problem to problem and can therefore be coded as a general, reusable library. Mesh data structures, linear algebra and finite element assembly are examples of operations that are naturally coded in a problem-independent way and made available in reusable libraries~\cite{www:fenics,www:petsc,www:sundance,www:deal.II,www:trilinos,www:diffpack}. However, some parts of a finite element program are difficult to code in a problem-independent way. In particular, this includes the evaluation of the \emph{element tensor} (the `element stiffness matrix'), that is, the evaluation of the local contribution from a finite element to a global sparse tensor (the ``stiffness matrix'') representing a discretized differential operator. These parts must thus be implemented by the application programmer for each specific combination of differential equation and discretization (finite element spaces). \index{form compilers} \index{FFC} \index{SyFi} However, domain-specific compilers such as FFC~\cite{www:ffc,logg:article:07,logg:article:09,logg:article:10,logg:article:11} and SyFi~\cite{www:syfi} make it possible to automatically generate the code for the evaluation of the element tensor. These \emph{form compilers} accept as input a high-level description of a finite element variational form and generate low-level code code for efficient evaluation of the element tensor and associated quantities. It thus becomes important to specify the \emph{interface} between form compilers and finite element assemblers such that the code generated by FFC, SyFi and other form compilers can be used to assemble finite element matrices and vectors (and in general tensors). Unified Form-assembly Code ========================== UFC (Unified Form-assembly Code) is a unified framework for finite element assembly. More precisely, it defines a fixed interface for communicating low level routines (functions) for evaluating and assembling finite element variational forms. The UFC interface consists of a single header file \texttt{ufc.h} that specifies a C++ interface that must be implemented by code that complies with the UFC specification. Both FFC (since version 0.4.0) and SyFi (since version 0.3.4) generate code that complies with the UFC specification. Thus, code generated by FFC and SyFi may be used interchangeably by any UFC-based finite element assembler, such as DOLFIN~\cite{www:dolfin}. Aim and scope ============= The UFC interface has been designed to make a minimal amount of assumptions on the form compilers generating the UFC code and the assemblers built on top of the UFC specification. Thus, the UFC specification provides a minimal amount of abstractions and data structures. Programmers wishing to implement the UFC specification will typically want to create system-specific (but simple) wrappers for the generated code. Few assumptions have also been made on the underlying finite element methodology. The current specification is limited to affinely mapped cells, but does not restrict the mapping of finite element function spaces. Thus, UFC code may be generated for elements where basis functions are transformed from the reference cell by the affine mapping, as well as for elements where the basis functions must be transformed by the Piola mapping. UFC code has been successfully generated and used in finite element codes for standard continuous Galerkin methods (Lagrange finite elements of arbitrary order), discontinuous Galerkin methods (including integrals of jumps and averages over interior facets) and mixed methods (including Brezzi--Douglas--Marini and Raviart--Thomas elements). Outline ======= In the next section, we give an overview of finite element assembly and explain how the code generated by form compilers may be used as the basic building blocks in the assembly algorithm. We then present the UFC interface in detail in Section~\ref{sec:interface}. In Section~\ref{sec:referencecells} and Section~\ref{sec:numbering}, we define the reference cells and numbering conventions that must be followed by UFC-based form compilers and assemblers. ======================= Finite element assembly ======================= In this section, we present a general algorithm for assembly of finite element variational forms and define the concepts that the UFC interface is based on. ffc-1.6.0/ufc/000077500000000000000000000000001255571034100130345ustar00rootroot00000000000000ffc-1.6.0/ufc/__init__.py000066400000000000000000000011741255571034100151500ustar00rootroot00000000000000__author__ = "Johan Hake (hake.dev@gmail.com)" __copyright__ = "Copyright (C) 2010-2015 Johan Hake" __date__ = "2010-08-19 -- 2015-02-26" __license__ = "Released to the public domain" # Import Python versions of the abstract classes in the UFC interface from .ufc import (cell, function, form, finite_element, dofmap, cell_integral, exterior_facet_integral, interior_facet_integral, vertex_integral, custom_integral, __version__, __swigversion__) ffc-1.6.0/ufc/ufc.h000066400000000000000000000422251255571034100137670ustar00rootroot00000000000000// This is UFC (Unified Form-assembly Code) v. 1.6.0. // This code is released into the public domain. // // The FEniCS Project (http://www.fenicsproject.org/) 2006-2015. #ifndef __UFC_H #define __UFC_H #define UFC_VERSION_MAJOR 1 #define UFC_VERSION_MINOR 6 #define UFC_VERSION_MAINTENANCE 0 #define UFC_VERSION_RELEASE 1 #include #include #include #include #define CONCAT(a,b,c) #a "." #b "." #c #define EVALUATOR(a,b,c) CONCAT(a,b,c) #if UFC_VERSION_RELEASE const char UFC_VERSION[] = EVALUATOR(UFC_VERSION_MAJOR, UFC_VERSION_MINOR, UFC_VERSION_MAINTENANCE); #else const char UFC_VERSION[] = EVALUATOR(UFC_VERSION_MAJOR, UFC_VERSION_MINOR, UFC_VERSION_MAINTENANCE) "dev"; #endif #undef CONCAT #undef EVALUATOR namespace ufc { /// Valid cell shapes enum shape {interval, triangle, quadrilateral, tetrahedron, hexahedron}; /// This class defines the data structure for a cell in a mesh. class cell { public: /// Constructor cell(): cell_shape(interval), topological_dimension(0), geometric_dimension(0), index(0), local_facet(-1), mesh_identifier(-1) {} /// Destructor virtual ~cell() {} /// Shape of the cell shape cell_shape; /// Topological dimension of the mesh std::size_t topological_dimension; /// Geometric dimension of the mesh std::size_t geometric_dimension; /// Array of global indices for the mesh entities of the cell std::vector > entity_indices; /// Cell index (short-cut for entity_indices[topological_dimension][0]) std::size_t index; /// Local facet index int local_facet; /// Cell orientation int orientation; /// Unique mesh identifier int mesh_identifier; }; /// This class defines the interface for a general tensor-valued function. class function { public: /// Destructor virtual ~function() {} /// Evaluate function at given point in cell virtual void evaluate(double* values, const double* coordinates, const cell& c) const = 0; }; /// This class defines the interface for a finite element. class finite_element { public: /// Destructor virtual ~finite_element() {} /// Return a string identifying the finite element virtual const char* signature() const = 0; /// Return the cell shape virtual shape cell_shape() const = 0; /// Return the topological dimension of the cell shape virtual std::size_t topological_dimension() const = 0; /// Return the geometric dimension of the cell shape virtual std::size_t geometric_dimension() const = 0; /// Return the dimension of the finite element function space virtual std::size_t space_dimension() const = 0; /// Return the rank of the value space virtual std::size_t value_rank() const = 0; /// Return the dimension of the value space for axis i virtual std::size_t value_dimension(std::size_t i) const = 0; /// Evaluate basis function i at given point x in cell virtual void evaluate_basis(std::size_t i, double* values, const double* x, const double* vertex_coordinates, int cell_orientation) const = 0; /// Evaluate all basis functions at given point x in cell virtual void evaluate_basis_all(double* values, const double* x, const double* vertex_coordinates, int cell_orientation) const = 0; /// Evaluate order n derivatives of basis function i at given point x in cell virtual void evaluate_basis_derivatives(std::size_t i, std::size_t n, double* values, const double* x, const double* vertex_coordinates, int cell_orientation) const = 0; /// Evaluate order n derivatives of all basis functions at given point x in cell virtual void evaluate_basis_derivatives_all(std::size_t n, double* values, const double* x, const double* vertex_coordinates, int cell_orientation) const = 0; // FIXME: cell argument only included here so we can pass it to the eval function... /// Evaluate linear functional for dof i on the function f virtual double evaluate_dof(std::size_t i, const function& f, const double* vertex_coordinates, int cell_orientation, const cell& c) const = 0; /// Evaluate linear functionals for all dofs on the function f virtual void evaluate_dofs(double* values, const function& f, const double* vertex_coordinates, int cell_orientation, const cell& c) const = 0; /// Interpolate vertex values from dof values virtual void interpolate_vertex_values(double* vertex_values, const double* dof_values, const double* vertex_coordinates, int cell_orientation, const cell& c) const = 0; /// Map coordinate xhat from reference cell to coordinate x in cell virtual void map_from_reference_cell(double* x, const double* xhat, const cell& c) const = 0; /// Map from coordinate x in cell to coordinate xhat in reference cell virtual void map_to_reference_cell(double* xhat, const double* x, const cell& c) const = 0; /// Return the number of sub elements (for a mixed element) virtual std::size_t num_sub_elements() const = 0; /// Create a new finite element for sub element i (for a mixed element) virtual finite_element* create_sub_element(std::size_t i) const = 0; /// Create a new class instance virtual finite_element* create() const = 0; }; /// This class defines the interface for a local-to-global mapping of /// degrees of freedom (dofs). class dofmap { public: /// Destructor virtual ~dofmap() {} /// Return a string identifying the dofmap virtual const char* signature() const = 0; /// Return true iff mesh entities of topological dimension d are /// needed virtual bool needs_mesh_entities(std::size_t d) const = 0; /// Return the topological dimension of the associated cell shape virtual std::size_t topological_dimension() const = 0; /// Return the geometric dimension of the associated cell shape virtual std::size_t geometric_dimension() const = 0; /// Return the dimension of the global finite element function space virtual std::size_t global_dimension(const std::vector& num_global_mesh_entities) const = 0; /// Return the dimension of the local finite element function space /// for a cell virtual std::size_t num_element_dofs() const = 0; /// Return the number of dofs on each cell facet virtual std::size_t num_facet_dofs() const = 0; /// Return the number of dofs associated with each cell entity of /// dimension d virtual std::size_t num_entity_dofs(std::size_t d) const = 0; /// Tabulate the local-to-global mapping of dofs on a cell virtual void tabulate_dofs(std::size_t* dofs, const std::vector& num_global_entities, const cell& c) const = 0; /// Tabulate the local-to-local mapping from facet dofs to cell dofs virtual void tabulate_facet_dofs(std::size_t* dofs, std::size_t facet) const = 0; /// Tabulate the local-to-local mapping of dofs on entity (d, i) virtual void tabulate_entity_dofs(std::size_t* dofs, std::size_t d, std::size_t i) const = 0; /// Tabulate the coordinates of all dofs on a cell virtual void tabulate_coordinates(double* dof_coordinates, const double* vertex_coordinates) const = 0; /// Return the number of sub dofmaps (for a mixed element) virtual std::size_t num_sub_dofmaps() const = 0; /// Create a new dofmap for sub dofmap i (for a mixed element) virtual dofmap* create_sub_dofmap(std::size_t i) const = 0; /// Create a new class instance virtual dofmap* create() const = 0; }; /// This class defines the shared interface for classes implementing /// the tabulation of a tensor corresponding to the local contribution /// to a form from an integral. class integral { public: /// Destructor virtual ~integral() {} /// Tabulate which form coefficients are used by this integral virtual const std::vector & enabled_coefficients() const = 0; }; /// This class defines the interface for the tabulation of the cell /// tensor corresponding to the local contribution to a form from /// the integral over a cell. class cell_integral: public integral { public: /// Destructor virtual ~cell_integral() {} /// Tabulate the tensor for the contribution from a local cell virtual void tabulate_tensor(double* A, const double * const * w, const double* vertex_coordinates, int cell_orientation) const = 0; }; /// This class defines the interface for the tabulation of the /// exterior facet tensor corresponding to the local contribution to /// a form from the integral over an exterior facet. class exterior_facet_integral: public integral { public: /// Destructor virtual ~exterior_facet_integral() {} /// Tabulate the tensor for the contribution from a local exterior facet virtual void tabulate_tensor(double* A, const double * const * w, const double* vertex_coordinates, std::size_t facet, int cell_orientation) const = 0; }; /// This class defines the interface for the tabulation of the /// interior facet tensor corresponding to the local contribution to /// a form from the integral over an interior facet. class interior_facet_integral: public integral { public: /// Destructor virtual ~interior_facet_integral() {} /// Tabulate the tensor for the contribution from a local interior facet virtual void tabulate_tensor(double* A, const double * const * w, const double* vertex_coordinates_0, const double* vertex_coordinates_1, std::size_t facet_0, std::size_t facet_1, int cell_orientation_0, int cell_orientation_1) const = 0; }; /// This class defines the interface for the tabulation of /// an expression evaluated at exactly one point. class vertex_integral: public integral { public: /// Constructor vertex_integral() {} /// Destructor virtual ~vertex_integral() {} /// Tabulate the tensor for the contribution from the local vertex virtual void tabulate_tensor(double* A, const double * const * w, const double* vertex_coordinates, std::size_t vertex, int cell_orientation) const = 0; }; /// This class defines the interface for the tabulation of the /// tensor corresponding to the local contribution to a form from /// the integral over a custom domain defined in terms of a set of /// quadrature points and weights. class custom_integral: public integral { public: /// Constructor custom_integral() {} /// Destructor virtual ~custom_integral() {}; /// Return the number of cells involved in evaluation of the integral virtual std::size_t num_cells() const = 0; /// Tabulate the tensor for the contribution from a custom domain virtual void tabulate_tensor(double* A, const double * const * w, const double* vertex_coordinates, std::size_t num_quadrature_points, const double* quadrature_points, const double* quadrature_weights, const double* facet_normals, int cell_orientation) const = 0; }; /// This class defines the interface for the assembly of the global /// tensor corresponding to a form with r + n arguments, that is, a /// mapping /// /// a : V1 x V2 x ... Vr x W1 x W2 x ... x Wn -> R /// /// with arguments v1, v2, ..., vr, w1, w2, ..., wn. The rank r /// global tensor A is defined by /// /// A = a(V1, V2, ..., Vr, w1, w2, ..., wn), /// /// where each argument Vj represents the application to the /// sequence of basis functions of Vj and w1, w2, ..., wn are given /// fixed functions (coefficients). class form { public: /// Destructor virtual ~form() {} /// Return a string identifying the form virtual const char* signature() const = 0; /// Return the rank of the global tensor (r) virtual std::size_t rank() const = 0; /// Return the number of coefficients (n) virtual std::size_t num_coefficients() const = 0; /// Return original coefficient position for each coefficient (0 <= i < n) virtual std::size_t original_coefficient_position(std::size_t i) const = 0; /// Create a new finite element for argument function 0 <= i < r+n virtual finite_element* create_finite_element(std::size_t i) const = 0; /// Create a new dofmap for argument function 0 <= i < r+n virtual dofmap* create_dofmap(std::size_t i) const = 0; /// Return the upper bound on subdomain ids for cell integrals virtual std::size_t max_cell_subdomain_id() const = 0; /// Return the upper bound on subdomain ids for exterior facet integrals virtual std::size_t max_exterior_facet_subdomain_id() const = 0; /// Return the upper bound on subdomain ids for interior facet integrals virtual std::size_t max_interior_facet_subdomain_id() const = 0; /// Return the upper bound on subdomain ids for vertex integrals virtual std::size_t max_vertex_subdomain_id() const = 0; /// Return the upper bound on subdomain ids for custom integrals virtual std::size_t max_custom_subdomain_id() const = 0; /// Return whether form has any cell integrals virtual bool has_cell_integrals() const = 0; /// Return whether form has any exterior facet integrals virtual bool has_exterior_facet_integrals() const = 0; /// Return whether form has any interior facet integrals virtual bool has_interior_facet_integrals() const = 0; /// Return whether form has any vertex integrals virtual bool has_vertex_integrals() const = 0; /// Return whether form has any custom integrals virtual bool has_custom_integrals() const = 0; /// Create a new cell integral on sub domain subdomain_id virtual cell_integral* create_cell_integral(std::size_t subdomain_id) const = 0; /// Create a new exterior facet integral on sub domain subdomain_id virtual exterior_facet_integral* create_exterior_facet_integral(std::size_t subdomain_id) const = 0; /// Create a new interior facet integral on sub domain subdomain_id virtual interior_facet_integral* create_interior_facet_integral(std::size_t subdomain_id) const = 0; /// Create a new vertex integral on sub domain subdomain_id virtual vertex_integral* create_vertex_integral(std::size_t subdomain_id) const = 0; /// Create a new custom integral on sub domain subdomain_id virtual custom_integral* create_custom_integral(std::size_t subdomain_id) const = 0; /// Create a new cell integral on everywhere else virtual cell_integral* create_default_cell_integral() const = 0; /// Create a new exterior facet integral on everywhere else virtual exterior_facet_integral* create_default_exterior_facet_integral() const = 0; /// Create a new interior facet integral on everywhere else virtual interior_facet_integral* create_default_interior_facet_integral() const = 0; /// Create a new vertex integral on everywhere else virtual vertex_integral* create_default_vertex_integral() const = 0; /// Create a new custom integral on everywhere else virtual custom_integral* create_default_custom_integral() const = 0; }; } #endif ffc-1.6.0/ufc/ufc.i000066400000000000000000000051571255571034100137730ustar00rootroot00000000000000%module ufc %{ #include "ufc.h" #include %} // Use std::shared_ptr %include // Ignore interface to ufc::cell that will not be available for the user %ignore ufc::cell::entity_indices; // Declare which classes should be stored using shared_ptr %include "ufc_shared_ptr_classes.i" %include //----------------------------------------------------------------------------- // Home brewed versions of the SWIG provided SWIG_AsVal(Type). //----------------------------------------------------------------------------- %fragment("Py_convert_uint", "header") { // A check for int and converter to uint SWIGINTERNINLINE bool Py_convert_uint(PyObject* in, std::size_t& value) { %#if PY_MAJOR_VERSION >= 3 if (!(PyLong_Check(in) && PyLong_AS_LONG(in)>=0)) return false; value = static_cast(PyLong_AS_LONG(in)); return true; %#else if (!(PyInt_Check(in) && PyInt_AS_LONG(in)>=0)) return false; value = static_cast(PyInt_AS_LONG(in)); return true; %#endif } } //----------------------------------------------------------------------------- // Out typemap (std::size_t) //----------------------------------------------------------------------------- %typemap(out) std::size_t { // Typemap std::size_t %#if PY_MAJOR_VERSION >= 3 $result = PyLong_FromLong(static_cast< long >($1)); %#else $result = PyInt_FromLong(static_cast< long >($1)); %#endif } //----------------------------------------------------------------------------- // Typecheck and in typemap (std::size_t) //----------------------------------------------------------------------------- %typecheck(SWIG_TYPECHECK_INTEGER) std::size_t { $1 = PyInt_Check($input) ? 1 : 0; } %typemap(in, fragment="Py_convert_uint") std::size_t { if (!Py_convert_uint($input, $1)) SWIG_exception(SWIG_TypeError, "expected positive 'int' for argument $argnum"); } //----------------------------------------------------------------------------- // Include the main header file //----------------------------------------------------------------------------- %include "ufc.h" // Include code to generate a __swigversion__ attribute to the cpp module // Add prefix to avoid naming problems with other modules %inline %{ int ufc_swigversion() { return SWIGVERSION; } %} %pythoncode %{ __version__ = UFC_VERSION del UFC_VERSION, UFC_VERSION_MAJOR, UFC_VERSION_MINOR, UFC_VERSION_MAINTENANCE, UFC_VERSION_RELEASE """Code for adding swig version to ufc extension module.""" tmp = hex(ufc_swigversion()) __swigversion__ = "%d.%d.%d"%(tuple(map(int, [tmp[-5], tmp[-3], tmp[-2:]]))) del tmp, ufc_swigversion %} ffc-1.6.0/ufc/ufc_geometry.h000066400000000000000000001262271255571034100157070ustar00rootroot00000000000000// This file provides utility functions for computing geometric quantities. // This code is released into the public domain. // // The FEniCS Project (http://www.fenicsproject.org/) 2013-2015. #ifndef __UFC_GEOMETRY_H #define __UFC_GEOMETRY_H #include // TODO: Wrap all in namespace ufc //namespace ufc //{ /// A note regarding data structures. All matrices are represented as /// row-major flattened raw C++ arrays. Benchmarks indicate that when /// optimization (-O1 and up) is used, the following conditions hold: /// /// 1. std::vector is just as fast as raw C++ arrays for indexing. /// /// 2. Flattened arrays are twice as fast as nested arrays, both for /// std:vector and raw C++ arrays. /// /// 3. Defining an array by 'std::vector x(n)', where n is a /// literal, leads to dynamic allocation and results in significant /// slowdowns compared to the definition 'double x[n]'. /// /// The conclusion is that we should use flattened raw C++ arrays in /// the interfaces for these utility functions, since some of the /// arrays passed to these functions (in particular Jacobians) are /// created inside the generated functions (tabulate_tensor). Note /// that an std::vector x may also be passed as raw pointer by &x[0]. // TODO: Should signatures of compute___d match for each foo? // On one hand the snippets use different quantities, on the other // some consistency is nice to simplify the code generation. // Currently only the arguments that are actually used are included. /// --- Some fixed numbers by name for readability in this file --- // TODO: Use these numbers where relevant below to make this file more self documenting // (namespaced using UFC_ in the names because they collide with variables in other libraries) // Use this for array dimensions indexed by local vertex number #define UFC_NUM_VERTICES_IN_INTERVAL 2 #define UFC_NUM_VERTICES_IN_TRIANGLE 3 #define UFC_NUM_VERTICES_IN_TETRAHEDRON 4 #define UFC_NUM_VERTICES_IN_QUADRILATERAL 4 #define UFC_NUM_VERTICES_IN_HEXAHEDRON 8 // Use this for array dimensions indexed by local edge number #define UFC_NUM_EDGES_IN_INTERVAL 1 #define UFC_NUM_EDGES_IN_TRIANGLE 3 #define UFC_NUM_EDGES_IN_TETRAHEDRON 6 #define UFC_NUM_EDGES_IN_QUADRILATERAL 4 #define UFC_NUM_EDGES_IN_HEXAHEDRON 12 // Use this for array dimensions indexed by local facet number #define UFC_NUM_FACETS_IN_INTERVAL 2 #define UFC_NUM_FACETS_IN_TRIANGLE 3 #define UFC_NUM_FACETS_IN_TETRAHEDRON 4 #define UFC_NUM_FACETS_IN_QUADRILATERAL 4 #define UFC_NUM_FACETS_IN_HEXAHEDRON 6 // Use UFC_GDIM_N to show the intention that the geometric dimension is N #define UFC_GDIM_1 1 #define UFC_GDIM_2 2 #define UFC_GDIM_3 3 // Use UFC_TDIM_N to show the intention that the topological dimension is N #define UFC_TDIM_1 1 #define UFC_TDIM_2 2 #define UFC_TDIM_3 3 /// --- Local reference cell coordinates by UFC conventions --- static const double interval_vertices[UFC_NUM_VERTICES_IN_INTERVAL][UFC_TDIM_1] = { {0.0}, {1.0} }; static const double triangle_vertices[UFC_NUM_VERTICES_IN_TRIANGLE][UFC_TDIM_2] = { {0.0, 0.0}, {1.0, 0.0}, {0.0, 1.0} }; static const double tetrahedron_vertices[UFC_NUM_VERTICES_IN_TETRAHEDRON][UFC_TDIM_3] = { {0.0, 0.0, 0.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0}, {0.0, 0.0, 1.0} }; // FIXME: Insert quad conventions here /* static const double quadrilateral_vertices[UFC_NUM_VERTICES_IN_QUADRILATERAL][UFC_TDIM_2] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, }; */ // FIXME: Insert quad conventions here /* static const double hexahedron_vertices[UFC_NUM_VERTICES_IN_HEXAHEDRON][UFC_TDIM_3] = { {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, {0.0, 0.0, 0.0}, }; */ /// --- Local reference cell barycenter by UFC conventions --- static const double interval_barycenter[UFC_TDIM_1] = { 0.5 }; static const double triangle_barycenter[UFC_TDIM_2] = { 1.0/3.0, 1.0/3.0 }; static const double tetrahedron_barycenter[UFC_TDIM_3] = { 0.25, 0.25, 0.25 }; // FIXME: Insert quad conventions here /* static const double quadrilateral_barycenter[UFC_TDIM_2] = { 0.5, 0.5 }; */ // FIXME: Insert quad conventions here /* static const double hexahedron_barycenter[UFC_TDIM_3] = { 0.5, 0.5, 0.5 }; */ /// --- Local reference cell facet barycenters by UFC conventions --- static const double interval_facet_barycenter[UFC_NUM_FACETS_IN_INTERVAL][UFC_TDIM_1] = { {0.0}, {1.0} }; static const double triangle_facet_barycenter[UFC_NUM_FACETS_IN_TRIANGLE][UFC_TDIM_2] = { {0.5, 0.5}, {0.0, 0.5}, {0.5, 0.0} }; static const double tetrahedron_facet_barycenter[UFC_NUM_FACETS_IN_TETRAHEDRON][UFC_TDIM_3] = { {0.5, 0.5, 0.5}, {0.0, 1.0/3.0, 1.0/3.0}, {1.0/3.0, 0.0, 1.0/3.0}, {1.0/3.0, 1.0/3.0, 0.0}, }; // FIXME: Insert quad conventions here /* static const double quadrilateral_facet_barycenter[UFC_NUM_FACETS_IN_QUADRILATERAL][UFC_TDIM_2] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, }; */ // FIXME: Insert quad conventions here /* static const double hexahedron_facet_barycenter[UFC_NUM_FACETS_IN_HEXAHEDRON][UFC_TDIM_3] = { {0.0, 0.5, 0.5}, {0.0, 0.5, 0.5}, {0.0, 0.5, 0.5}, {0.0, 0.5, 0.5}, {0.0, 0.5, 0.5}, {0.0, 0.5, 0.5}, }; */ /// --- Local reference cell facet orientations by UFC conventions --- static const double interval_facet_orientations[UFC_NUM_FACETS_IN_INTERVAL] = { -1.0, +1.0, }; static const double triangle_facet_orientations[UFC_NUM_FACETS_IN_TRIANGLE] = { +1.0, -1.0, +1.0 }; static const double tetrahedron_facet_orientations[UFC_NUM_FACETS_IN_TETRAHEDRON] = { +1.0, -1.0, +1.0, -1.0 }; // FIXME: Insert quad conventions here /* static const double quadrilateral_facet_orientations[UFC_NUM_FACETS_IN_QUADRILATERAL] = { +1.0, +1.0, +1.0, +1.0, }; */ // FIXME: Insert quad conventions here /* static const double hexahedron_facet_orientations[UFC_NUM_FACETS_IN_HEXAHEDRON] = { +1.0, +1.0, +1.0, +1.0, +1.0, +1.0, }; */ /// --- Local reference cell entity relations by UFC conventions --- static const unsigned int triangle_edge_vertices[UFC_NUM_EDGES_IN_TRIANGLE][2] = { {1, 2}, {0, 2}, {0, 1} }; static const unsigned int tetrahedron_edge_vertices[UFC_NUM_EDGES_IN_TETRAHEDRON][2] = { {2, 3}, {1, 3}, {1, 2}, {0, 3}, {0, 2}, {0, 1} }; // FIXME: Insert quad conventions here /* static const unsigned int quadrilateral_edge_vertices[UFC_NUM_EDGES_IN_QUADRILATERAL][2] = { {0, 0}, {0, 0}, {0, 0}, {0, 0}, }; */ // FIXME: Insert quad conventions here /* static const unsigned int hexahedron_edge_vertices[UFC_NUM_EDGES_IN_HEXAHEDRON][2] = { {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, }; */ /// --- Local reference cell entity relations by UFC conventions --- static const unsigned int interval_facet_vertices[UFC_NUM_FACETS_IN_INTERVAL][1] = { {0}, {1} }; static const unsigned int triangle_facet_vertices[UFC_NUM_FACETS_IN_TRIANGLE][UFC_NUM_VERTICES_IN_INTERVAL] = { {1, 2}, {0, 2}, {0, 1} }; static const unsigned int tetrahedron_facet_vertices[UFC_NUM_FACETS_IN_TETRAHEDRON][UFC_NUM_VERTICES_IN_TRIANGLE] = { {1, 2, 3}, {0, 2, 3}, {0, 1, 3}, {0, 1, 2} }; static const unsigned int tetrahedron_facet_edge_vertices[UFC_NUM_FACETS_IN_TETRAHEDRON][UFC_NUM_FACETS_IN_TRIANGLE][UFC_NUM_VERTICES_IN_INTERVAL] = { {{2, 3}, {1, 3}, {1, 2}}, {{2, 3}, {0, 3}, {0, 2}}, {{1, 3}, {0, 3}, {0, 1}}, {{1, 2}, {0, 2}, {0, 1}} }; // FIXME: Insert quad conventions here /* static const unsigned int quadrilateral_facet_vertices[UFC_NUM_FACETS_IN_QUADRILATERAL][UFC_NUM_VERTICES_IN_INTERVAL] = { {0, 0}, {0, 0}, {0, 0}, {0, 0}, }; */ // FIXME: Insert quad conventions here /* static const unsigned int hexahedron_facet_vertices[UFC_NUM_FACETS_IN_HEXAHEDRON][UFC_NUM_VERTICES_IN_QUADRILATERAL] = { {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, }; */ // FIXME: Insert quad conventions here /* static const unsigned int hexahedron_facet_edge_vertices[UFC_NUM_FACETS_IN_HEXAHEDRON][UFC_NUM_FACETS_IN_QUADRILATERAL][UFC_NUM_VERTICES_IN_INTERVAL] = { {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, }; */ /// --- Reference cell edge vectors by UFC conventions (edge vertex 1 - edge vertex 0 for each edge in cell) --- static const double triangle_reference_edge_vectors[UFC_NUM_EDGES_IN_TRIANGLE][UFC_TDIM_2] = { {-1.0, 1.0}, { 0.0, 1.0}, { 1.0, 0.0}, }; static const double tetrahedron_reference_edge_vectors[UFC_NUM_EDGES_IN_TETRAHEDRON][UFC_TDIM_3] = { { 0.0, -1.0, 1.0}, {-1.0, 0.0, 1.0}, {-1.0, 1.0, 0.0}, { 0.0, 0.0, 1.0}, { 0.0, 1.0, 0.0}, { 1.0, 0.0, 0.0}, }; // Edge vectors for each triangle facet of a tetrahedron static const double tetrahedron_facet_reference_edge_vectors[UFC_NUM_FACETS_IN_TETRAHEDRON][UFC_NUM_EDGES_IN_TRIANGLE][UFC_TDIM_3] = { { // facet 0 { 0.0, -1.0, 1.0}, {-1.0, 0.0, 1.0}, {-1.0, 1.0, 0.0}, }, { // facet 1 { 0.0, -1.0, 1.0}, { 0.0, 0.0, 1.0}, { 0.0, 1.0, 0.0}, }, { // facet 2 {-1.0, 0.0, 1.0}, { 0.0, 0.0, 1.0}, { 1.0, 0.0, 0.0}, }, { // facet 3 {-1.0, 1.0, 0.0}, { 0.0, 1.0, 0.0}, { 1.0, 0.0, 0.0}, }, }; // FIXME: Insert quad conventions here /* static const double quadrilateral_reference_edge_vectors[UFC_NUM_EDGES_IN_QUADRILATERAL][UFC_TDIM_2] = { { 0.0, 0.0}, { 0.0, 0.0}, { 0.0, 0.0}, { 0.0, 0.0}, }; */ // FIXME: Insert quad conventions here /* static const double hexahedron_reference_edge_vectors[UFC_NUM_EDGES_IN_HEXAHEDRON][UFC_TDIM_3] = { { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, }; */ // FIXME: Insert quad conventions here /* // Edge vectors for each quadrilateral facet of a hexahedron static const double hexahedron_facet_reference_edge_vectors[UFC_NUM_FACETS_IN_HEXAHEDRON][UFC_NUM_EDGES_IN_QUADRILATERAL][UFC_TDIM_3] = { { // facet 0 { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, }, { // facet 1 { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, }, { // facet 2 { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, }, { // facet 3 { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, }, { // facet 4 { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, }, { // facet 5 { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, }, }; */ /// --- Reference cell facet normals by UFC conventions (outwards pointing on reference cell) --- static const double interval_reference_facet_normals[UFC_NUM_FACETS_IN_INTERVAL][UFC_TDIM_1] = { {-1.0}, {+1.0}, }; static const double triangle_reference_facet_normals[UFC_NUM_FACETS_IN_TRIANGLE][UFC_TDIM_2] = { { 0.7071067811865476, 0.7071067811865476 }, {-1.0, 0.0}, { 0.0, -1.0}, }; static const double tetrahedron_reference_facet_normals[UFC_NUM_FACETS_IN_TETRAHEDRON][UFC_TDIM_3] = { {0.5773502691896258, 0.5773502691896258, 0.5773502691896258}, {-1.0, 0.0, 0.0}, { 0.0, -1.0, 0.0}, { 0.0, 0.0, -1.0}, }; // FIXME: Insert quad conventions here /* static const double quadrilateral_reference_facet_normals[UFC_NUM_FACETS_IN_QUADRILATERAL][UFC_TDIM_2] = { { 0.0, 0.0 }, { 0.0, 0.0 }, { 0.0, 0.0 }, { 0.0, 0.0 }, }; */ // FIXME: Insert quad conventions here /* static const double hexahedron_reference_facet_normals[UFC_NUM_FACETS_IN_HEXAHEDRON][UFC_TDIM_3] = { { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, { 0.0, 0.0, 0.0}, }; */ /// --- Jacobians of reference facet cell to reference cell coordinate mappings by UFC conventions --- static const double triangle_reference_facet_jacobian[UFC_NUM_FACETS_IN_TRIANGLE][UFC_TDIM_2][UFC_TDIM_2-1] = { { {-1.0}, { 1.0} }, { { 0.0}, { 1.0} }, { { 1.0}, { 0.0} }, }; static const double tetrahedron_reference_facet_jacobian[UFC_NUM_FACETS_IN_TETRAHEDRON][UFC_TDIM_3][UFC_TDIM_3-1] = { { {-1.0, -1.0}, {1.0, 0.0}, {0.0, 1.0} }, { { 0.0, 0.0}, {1.0, 0.0}, {0.0, 1.0} }, { { 1.0, 0.0}, {0.0, 0.0}, {0.0, 1.0} }, { { 1.0, 0.0}, {0.0, 1.0}, {0.0, 0.0} }, }; // FIXME: Insert quad conventions here /* static const double quadrilateral_reference_facet_jacobian[UFC_NUM_FACETS_IN_QUADRILATERAL][UFC_TDIM_2][UFC_TDIM_2-1] = { { { 0.0}, { 0.0} }, { { 0.0}, { 0.0} }, { { 0.0}, { 0.0} }, { { 0.0}, { 0.0} }, }; */ // FIXME: Insert quad conventions here /* static const double hexahedron_reference_facet_jacobian[UFC_NUM_FACETS_IN_HEXAHEDRON][UFC_TDIM_3][UFC_TDIM_3-1] = { { { 0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, { { 0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, { { 0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, { { 0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, { { 0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, { { 0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, }; */ /// --- Coordinate mappings from reference facet cell to reference cell by UFC conventions --- inline void compute_reference_facet_to_reference_cell_coordinates_interval(double Xc[UFC_TDIM_1], unsigned int facet) { switch (facet) { case 0: Xc[0] = 0.0; break; case 1: Xc[0] = 1.0; break; }; } inline void compute_reference_facet_to_reference_cell_coordinates_triangle(double Xc[UFC_TDIM_2], const double Xf[UFC_TDIM_2-1], unsigned int facet) { switch (facet) { case 0: Xc[0] = 1.0 - Xf[0]; Xc[1] = Xf[0]; break; case 1: Xc[0] = 0.0; Xc[1] = Xf[0]; break; case 2: Xc[0] = Xf[0]; Xc[1] = 0.0; break; }; } inline void compute_reference_facet_to_reference_cell_coordinates_tetrahedron(double Xc[UFC_TDIM_3], const double Xf[UFC_TDIM_3-1], unsigned int facet) { switch (facet) { case 0: Xc[0] = 1.0 - Xf[0] - Xf[1]; Xc[1] = Xf[0]; Xc[2] = Xf[1]; break; case 1: Xc[0] = 0.0; Xc[1] = Xf[0]; Xc[2] = Xf[1]; break; case 2: Xc[0] = Xf[0]; Xc[1] = 0.0; Xc[2] = Xf[1]; break; case 3: Xc[0] = Xf[0]; Xc[1] = Xf[1]; Xc[2] = 0.0; break; }; } ///--- Computation of Jacobian matrices --- /// Compute Jacobian J for interval embedded in R^1 inline void compute_jacobian_interval_1d(double J[UFC_GDIM_1*UFC_TDIM_1], const double vertex_coordinates[2]) { J[0] = vertex_coordinates[1] - vertex_coordinates[0]; } /// Compute Jacobian J for interval embedded in R^2 inline void compute_jacobian_interval_2d(double J[UFC_GDIM_2*UFC_TDIM_1], const double vertex_coordinates[4]) { J[0] = vertex_coordinates[2] - vertex_coordinates[0]; J[1] = vertex_coordinates[3] - vertex_coordinates[1]; } /// Compute Jacobian J for interval embedded in R^3 inline void compute_jacobian_interval_3d(double J[UFC_GDIM_3*UFC_TDIM_1], const double vertex_coordinates[6]) { J[0] = vertex_coordinates[3] - vertex_coordinates[0]; J[1] = vertex_coordinates[4] - vertex_coordinates[1]; J[2] = vertex_coordinates[5] - vertex_coordinates[2]; } /// Compute Jacobian J for triangle embedded in R^2 inline void compute_jacobian_triangle_2d(double J[UFC_GDIM_2*UFC_TDIM_2], const double vertex_coordinates[6]) { J[0] = vertex_coordinates[2] - vertex_coordinates[0]; J[1] = vertex_coordinates[4] - vertex_coordinates[0]; J[2] = vertex_coordinates[3] - vertex_coordinates[1]; J[3] = vertex_coordinates[5] - vertex_coordinates[1]; } /// Compute Jacobian J for triangle embedded in R^3 inline void compute_jacobian_triangle_3d(double J[UFC_GDIM_3*UFC_TDIM_2], const double vertex_coordinates[9]) { J[0] = vertex_coordinates[3] - vertex_coordinates[0]; J[1] = vertex_coordinates[6] - vertex_coordinates[0]; J[2] = vertex_coordinates[4] - vertex_coordinates[1]; J[3] = vertex_coordinates[7] - vertex_coordinates[1]; J[4] = vertex_coordinates[5] - vertex_coordinates[2]; J[5] = vertex_coordinates[8] - vertex_coordinates[2]; } /// Compute Jacobian J for tetrahedron embedded in R^3 inline void compute_jacobian_tetrahedron_3d(double J[UFC_GDIM_3*UFC_TDIM_3], const double vertex_coordinates[12]) { J[0] = vertex_coordinates[3] - vertex_coordinates[0]; J[1] = vertex_coordinates[6] - vertex_coordinates[0]; J[2] = vertex_coordinates[9] - vertex_coordinates[0]; J[3] = vertex_coordinates[4] - vertex_coordinates[1]; J[4] = vertex_coordinates[7] - vertex_coordinates[1]; J[5] = vertex_coordinates[10] - vertex_coordinates[1]; J[6] = vertex_coordinates[5] - vertex_coordinates[2]; J[7] = vertex_coordinates[8] - vertex_coordinates[2]; J[8] = vertex_coordinates[11] - vertex_coordinates[2]; } //--- Computation of Jacobian inverses --- // TODO: Remove this when ffc is updated to use the NEW ones below /// Compute Jacobian inverse K for interval embedded in R^1 inline void compute_jacobian_inverse_interval_1d(double* K, double& det, const double* J) { det = J[0]; K[0] = 1.0 / det; } /// Compute Jacobian (pseudo)inverse K for interval embedded in R^2 inline void compute_jacobian_inverse_interval_2d(double* K, double& det, const double* J) { const double det2 = J[0]*J[0] + J[1]*J[1]; det = std::sqrt(det2); K[0] = J[0] / det2; K[1] = J[1] / det2; } /// Compute Jacobian (pseudo)inverse K for interval embedded in R^3 inline void compute_jacobian_inverse_interval_3d(double* K, double& det, const double* J) { // TODO: Move computation of det to a separate function, det is often needed when K is not const double det2 = J[0]*J[0] + J[1]*J[1] + J[2]*J[2]; det = std::sqrt(det2); K[0] = J[0] / det2; K[1] = J[1] / det2; K[2] = J[2] / det2; } /// Compute Jacobian inverse K for triangle embedded in R^2 inline void compute_jacobian_inverse_triangle_2d(double* K, double& det, const double* J) { det = J[0]*J[3] - J[1]*J[2]; K[0] = J[3] / det; K[1] = -J[1] / det; K[2] = -J[2] / det; K[3] = J[0] / det; } /// Compute Jacobian (pseudo)inverse K for triangle embedded in R^3 inline void compute_jacobian_inverse_triangle_3d(double* K, double& det, const double* J) { const double d_0 = J[2]*J[5] - J[4]*J[3]; const double d_1 = J[4]*J[1] - J[0]*J[5]; const double d_2 = J[0]*J[3] - J[2]*J[1]; const double c_0 = J[0]*J[0] + J[2]*J[2] + J[4]*J[4]; const double c_1 = J[1]*J[1] + J[3]*J[3] + J[5]*J[5]; const double c_2 = J[0]*J[1] + J[2]*J[3] + J[4]*J[5]; const double den = c_0*c_1 - c_2*c_2; const double det2 = d_0*d_0 + d_1*d_1 + d_2*d_2; det = std::sqrt(det2); K[0] = (J[0]*c_1 - J[1]*c_2) / den; K[1] = (J[2]*c_1 - J[3]*c_2) / den; K[2] = (J[4]*c_1 - J[5]*c_2) / den; K[3] = (J[1]*c_0 - J[0]*c_2) / den; K[4] = (J[3]*c_0 - J[2]*c_2) / den; K[5] = (J[5]*c_0 - J[4]*c_2) / den; } /// Compute Jacobian inverse K for tetrahedron embedded in R^3 inline void compute_jacobian_inverse_tetrahedron_3d(double* K, double& det, const double* J) { const double d_00 = J[4]*J[8] - J[5]*J[7]; const double d_01 = J[5]*J[6] - J[3]*J[8]; const double d_02 = J[3]*J[7] - J[4]*J[6]; const double d_10 = J[2]*J[7] - J[1]*J[8]; const double d_11 = J[0]*J[8] - J[2]*J[6]; const double d_12 = J[1]*J[6] - J[0]*J[7]; const double d_20 = J[1]*J[5] - J[2]*J[4]; const double d_21 = J[2]*J[3] - J[0]*J[5]; const double d_22 = J[0]*J[4] - J[1]*J[3]; det = J[0]*d_00 + J[3]*d_10 + J[6]*d_20; K[0] = d_00 / det; K[1] = d_10 / det; K[2] = d_20 / det; K[3] = d_01 / det; K[4] = d_11 / det; K[5] = d_21 / det; K[6] = d_02 / det; K[7] = d_12 / det; K[8] = d_22 / det; } //--- NEW Computation of Jacobian (sub)determinants --- /// Compute Jacobian determinant for interval embedded in R^1 inline void compute_jacobian_determinants_interval_1d(double & det, const double J[UFC_GDIM_1*UFC_TDIM_1]) { det = J[0]; } /// Compute Jacobian (pseudo)determinants for interval embedded in R^2 inline void compute_jacobian_determinants_interval_2d(double & det2, double & det, const double J[UFC_GDIM_2*UFC_TDIM_1]) { det2 = J[0]*J[0] + J[1]*J[1]; det = std::sqrt(det2); } /// Compute Jacobian (pseudo)determinants for interval embedded in R^3 inline void compute_jacobian_determinants_interval_3d(double & det2, double & det, const double J[UFC_GDIM_3*UFC_TDIM_1]) { det2 = J[0]*J[0] + J[1]*J[1] + J[2]*J[2]; det = std::sqrt(det2); } /// Compute Jacobian determinant for triangle embedded in R^2 inline void compute_jacobian_determinants_triangle_2d(double & det, const double J[UFC_GDIM_2*UFC_TDIM_2]) { det = J[0]*J[3] - J[1]*J[2]; } /// Compute Jacobian (pseudo)determinants for triangle embedded in R^3 inline void compute_jacobian_determinants_triangle_3d(double & den, double & det2, double & det, double c[3], const double J[UFC_GDIM_3*UFC_TDIM_2]) { const double d_0 = J[2]*J[5] - J[4]*J[3]; const double d_1 = J[4]*J[1] - J[0]*J[5]; const double d_2 = J[0]*J[3] - J[2]*J[1]; c[0] = J[0]*J[0] + J[2]*J[2] + J[4]*J[4]; c[1] = J[1]*J[1] + J[3]*J[3] + J[5]*J[5]; c[2] = J[0]*J[1] + J[2]*J[3] + J[4]*J[5]; den = c[0]*c[1] - c[2]*c[2]; det2 = d_0*d_0 + d_1*d_1 + d_2*d_2; det = std::sqrt(det2); } /// Compute Jacobian determinants for tetrahedron embedded in R^3 inline void compute_jacobian_determinants_tetrahedron_3d(double & det, double d[9], const double J[UFC_GDIM_3*UFC_TDIM_3]) { d[0*3 + 0] = J[4]*J[8] - J[5]*J[7]; d[0*3 + 1] = J[5]*J[6] - J[3]*J[8]; d[0*3 + 2] = J[3]*J[7] - J[4]*J[6]; d[1*3 + 0] = J[2]*J[7] - J[1]*J[8]; d[1*3 + 1] = J[0]*J[8] - J[2]*J[6]; d[1*3 + 2] = J[1]*J[6] - J[0]*J[7]; d[2*3 + 0] = J[1]*J[5] - J[2]*J[4]; d[2*3 + 1] = J[2]*J[3] - J[0]*J[5]; d[2*3 + 2] = J[0]*J[4] - J[1]*J[3]; det = J[0]*d[0*3 + 0] + J[3]*d[1*3 + 0] + J[6]*d[2*3 + 0]; } //--- NEW Computation of Jacobian inverses --- /// Compute Jacobian inverse K for interval embedded in R^1 inline void new_compute_jacobian_inverse_interval_1d(double K[UFC_TDIM_1*UFC_GDIM_1], double det) { K[0] = 1.0 / det; } /// Compute Jacobian (pseudo)inverse K for interval embedded in R^2 inline void new_compute_jacobian_inverse_interval_2d(double K[UFC_TDIM_1*UFC_GDIM_2], double det2, const double J[UFC_GDIM_2*UFC_TDIM_1]) { K[0] = J[0] / det2; K[1] = J[1] / det2; } /// Compute Jacobian (pseudo)inverse K for interval embedded in R^3 inline void new_compute_jacobian_inverse_interval_3d(double K[UFC_TDIM_1*UFC_GDIM_3], double det2, const double J[UFC_GDIM_3*UFC_TDIM_1]) { K[0] = J[0] / det2; K[1] = J[1] / det2; K[2] = J[2] / det2; } /// Compute Jacobian inverse K for triangle embedded in R^2 inline void new_compute_jacobian_inverse_triangle_2d(double K[UFC_TDIM_2*UFC_GDIM_2], double det, const double J[UFC_GDIM_2*UFC_TDIM_2]) { K[0] = J[3] / det; K[1] = -J[1] / det; K[2] = -J[2] / det; K[3] = J[0] / det; } /// Compute Jacobian (pseudo)inverse K for triangle embedded in R^3 inline void new_compute_jacobian_inverse_triangle_3d(double K[UFC_TDIM_2*UFC_GDIM_3], double den, const double c[3], const double J[UFC_GDIM_3*UFC_TDIM_2]) { K[0] = (J[0]*c[1] - J[1]*c[2]) / den; K[1] = (J[2]*c[1] - J[3]*c[2]) / den; K[2] = (J[4]*c[1] - J[5]*c[2]) / den; K[3] = (J[1]*c[0] - J[0]*c[2]) / den; K[4] = (J[3]*c[0] - J[2]*c[2]) / den; K[5] = (J[5]*c[0] - J[4]*c[2]) / den; } /// Compute Jacobian inverse K for tetrahedron embedded in R^3 inline void new_compute_jacobian_inverse_tetrahedron_3d(double K[UFC_TDIM_3*UFC_GDIM_3], double det, const double d[9]) { K[0] = d[0*3 + 0] / det; K[1] = d[1*3 + 0] / det; K[2] = d[2*3 + 0] / det; K[3] = d[0*3 + 1] / det; K[4] = d[1*3 + 1] / det; K[5] = d[2*3 + 1] / det; K[6] = d[0*3 + 2] / det; K[7] = d[1*3 + 2] / det; K[8] = d[2*3 + 2] / det; } // --- Computation of edge, face, facet scaling factors /// Compute edge scaling factors for triangle embedded in R^2 inline void compute_edge_scaling_factors_triangle_2d(double dx[2], const double vertex_coordinates[6], std::size_t facet) { // Get vertices on edge const unsigned int v0 = triangle_facet_vertices[facet][0]; const unsigned int v1 = triangle_facet_vertices[facet][1]; // Compute scale factor (length of edge scaled by length of reference interval) dx[0] = vertex_coordinates[2*v1 + 0] - vertex_coordinates[2*v0 + 0]; dx[1] = vertex_coordinates[2*v1 + 1] - vertex_coordinates[2*v0 + 1]; } /// Compute facet scaling factor for triangle embedded in R^2 inline void compute_facet_scaling_factor_triangle_2d(double & det, const double dx[2]) { det = std::sqrt(dx[0]*dx[0] + dx[1]*dx[1]); } /// Compute edge scaling factors for triangle embedded in R^3 inline void compute_edge_scaling_factors_triangle_3d(double dx[3], const double vertex_coordinates[9], std::size_t facet) { // Get vertices on edge const unsigned int v0 = triangle_facet_vertices[facet][0]; const unsigned int v1 = triangle_facet_vertices[facet][1]; // Compute scale factor (length of edge scaled by length of reference interval) dx[0] = vertex_coordinates[3*v1 + 0] - vertex_coordinates[3*v0 + 0]; dx[1] = vertex_coordinates[3*v1 + 1] - vertex_coordinates[3*v0 + 1]; dx[2] = vertex_coordinates[3*v1 + 2] - vertex_coordinates[3*v0 + 2]; } /// Compute facet scaling factor for triangle embedded in R^3 inline void compute_facet_scaling_factor_triangle_3d(double & det, const double dx[3]) { det = std::sqrt(dx[0]*dx[0] + dx[1]*dx[1] + dx[2]*dx[2]); } /// Compute face scaling factors for tetrahedron embedded in R^3 inline void compute_face_scaling_factors_tetrahedron_3d(double a[3], const double vertex_coordinates[12], std::size_t facet) { // Get vertices on face const unsigned int v0 = tetrahedron_facet_vertices[facet][0]; const unsigned int v1 = tetrahedron_facet_vertices[facet][1]; const unsigned int v2 = tetrahedron_facet_vertices[facet][2]; // Compute scale factor (area of face scaled by area of reference triangle) a[0] = (vertex_coordinates[3*v0 + 1]*vertex_coordinates[3*v1 + 2] + vertex_coordinates[3*v0 + 2]*vertex_coordinates[3*v2 + 1] + vertex_coordinates[3*v1 + 1]*vertex_coordinates[3*v2 + 2]) - (vertex_coordinates[3*v2 + 1]*vertex_coordinates[3*v1 + 2] + vertex_coordinates[3*v2 + 2]*vertex_coordinates[3*v0 + 1] + vertex_coordinates[3*v1 + 1]*vertex_coordinates[3*v0 + 2]); a[1] = (vertex_coordinates[3*v0 + 2]*vertex_coordinates[3*v1 + 0] + vertex_coordinates[3*v0 + 0]*vertex_coordinates[3*v2 + 2] + vertex_coordinates[3*v1 + 2]*vertex_coordinates[3*v2 + 0]) - (vertex_coordinates[3*v2 + 2]*vertex_coordinates[3*v1 + 0] + vertex_coordinates[3*v2 + 0]*vertex_coordinates[3*v0 + 2] + vertex_coordinates[3*v1 + 2]*vertex_coordinates[3*v0 + 0]); a[2] = (vertex_coordinates[3*v0 + 0]*vertex_coordinates[3*v1 + 1] + vertex_coordinates[3*v0 + 1]*vertex_coordinates[3*v2 + 0] + vertex_coordinates[3*v1 + 0]*vertex_coordinates[3*v2 + 1]) - (vertex_coordinates[3*v2 + 0]*vertex_coordinates[3*v1 + 1] + vertex_coordinates[3*v2 + 1]*vertex_coordinates[3*v0 + 0] + vertex_coordinates[3*v1 + 0]*vertex_coordinates[3*v0 + 1]); } /// Compute facet scaling factor for tetrahedron embedded in R^3 inline void compute_facet_scaling_factor_tetrahedron_3d(double & det, const double a[3]) { det = std::sqrt(a[0]*a[0] + a[1]*a[1] + a[2]*a[2]); } ///--- Compute facet normal directions --- /// Compute facet direction for interval embedded in R^1 inline void compute_facet_normal_direction_interval_1d(bool & direction, const double vertex_coordinates[2], std::size_t facet) { direction = facet == 0 ? vertex_coordinates[0] > vertex_coordinates[1] : vertex_coordinates[1] > vertex_coordinates[0]; } /// Compute facet direction for triangle embedded in R^2 inline void compute_facet_normal_direction_triangle_2d(bool & direction, const double vertex_coordinates[6], const double dx[2], std::size_t facet) { const unsigned int v0 = triangle_facet_vertices[facet][0]; direction = dx[1]*(vertex_coordinates[2*facet ] - vertex_coordinates[2*v0 ]) - dx[0]*(vertex_coordinates[2*facet + 1] - vertex_coordinates[2*v0 + 1]) < 0; } /// Compute facet direction for tetrahedron embedded in R^3 inline void compute_facet_normal_direction_tetrahedron_3d(bool & direction, const double vertex_coordinates[9], const double a[3], std::size_t facet) { const unsigned int v0 = tetrahedron_facet_vertices[facet][0]; direction = a[0]*(vertex_coordinates[3*facet ] - vertex_coordinates[3*v0 ]) + a[1]*(vertex_coordinates[3*facet + 1] - vertex_coordinates[3*v0 + 1]) + a[2]*(vertex_coordinates[3*facet + 2] - vertex_coordinates[3*v0 + 2]) < 0; } ///--- Compute facet normal vectors --- /// Compute facet normal for interval embedded in R^1 inline void compute_facet_normal_interval_1d(double n[UFC_GDIM_1], bool direction) { // Facet normals are 1.0 or -1.0: (-1.0) <-- X------X --> (1.0) n[0] = direction ? 1.0 : -1.0; } /// Compute facet normal for interval embedded in R^2 inline void compute_facet_normal_interval_2d(double n[UFC_GDIM_2], const double vertex_coordinates[4], std::size_t facet) { if (facet == 0) { n[0] = vertex_coordinates[0] - vertex_coordinates[2]; n[1] = vertex_coordinates[1] - vertex_coordinates[3]; } else { n[0] = vertex_coordinates[2] - vertex_coordinates[0]; n[1] = vertex_coordinates[3] - vertex_coordinates[1]; } const double n_length = std::sqrt(n[0]*n[0] + n[1]*n[1]); n[0] /= n_length; n[1] /= n_length; } /// Compute facet normal for interval embedded in R^3 inline void compute_facet_normal_interval_3d(double n[UFC_GDIM_3], const double vertex_coordinates[6], std::size_t facet) { if (facet == 0) { n[0] = vertex_coordinates[0] - vertex_coordinates[3]; n[1] = vertex_coordinates[1] - vertex_coordinates[4]; n[1] = vertex_coordinates[2] - vertex_coordinates[5]; } else { n[0] = vertex_coordinates[3] - vertex_coordinates[0]; n[1] = vertex_coordinates[4] - vertex_coordinates[1]; n[1] = vertex_coordinates[5] - vertex_coordinates[2]; } const double n_length = std::sqrt(n[0]*n[0] + n[1]*n[1] + n[2]*n[2]); n[0] /= n_length; n[1] /= n_length; n[2] /= n_length; } /// Compute facet normal for triangle embedded in R^2 inline void compute_facet_normal_triangle_2d(double n[UFC_GDIM_2], const double dx[2], const double det, bool direction) { // Compute facet normals from the facet scale factor constants n[0] = direction ? dx[1] / det : -dx[1] / det; n[1] = direction ? -dx[0] / det : dx[0] / det; } /// Compute facet normal for triangle embedded in R^3 inline void compute_facet_normal_triangle_3d(double n[UFC_GDIM_3], const double vertex_coordinates[6], std::size_t facet) { // Compute facet normal for triangles in 3D const unsigned int vertex0 = facet; // Get coordinates corresponding the vertex opposite this const unsigned int vertex1 = triangle_facet_vertices[facet][0]; const unsigned int vertex2 = triangle_facet_vertices[facet][1]; // Define vectors n = (p2 - p0) and t = normalized (p2 - p1) n[0] = vertex_coordinates[3*vertex2 + 0] - vertex_coordinates[3*vertex0 + 0]; n[1] = vertex_coordinates[3*vertex2 + 1] - vertex_coordinates[3*vertex0 + 1]; n[2] = vertex_coordinates[3*vertex2 + 2] - vertex_coordinates[3*vertex0 + 2]; double t0 = vertex_coordinates[3*vertex2 + 0] - vertex_coordinates[3*vertex1 + 0]; double t1 = vertex_coordinates[3*vertex2 + 1] - vertex_coordinates[3*vertex1 + 1]; double t2 = vertex_coordinates[3*vertex2 + 2] - vertex_coordinates[3*vertex1 + 2]; const double t_length = std::sqrt(t0*t0 + t1*t1 + t2*t2); t0 /= t_length; t1 /= t_length; t2 /= t_length; // Subtract, the projection of (p2 - p0) onto (p2 - p1), from (p2 - p0) const double ndott = t0*n[0] + t1*n[1] + t2*n[2]; n[0] -= ndott*t0; n[1] -= ndott*t1; n[2] -= ndott*t2; const double n_length = std::sqrt(n[0]*n[0] + n[1]*n[1] + n[2]*n[2]); // Normalize n[0] /= n_length; n[1] /= n_length; n[2] /= n_length; } /// Compute facet normal for tetrahedron embedded in R^3 inline void compute_facet_normal_tetrahedron_3d(double n[UFC_GDIM_3], const double a[3], const double det, bool direction) { // Compute facet normals from the facet scale factor constants n[0] = direction ? a[0] / det : -a[0] / det; n[1] = direction ? a[1] / det : -a[1] / det; n[2] = direction ? a[2] / det : -a[2] / det; } ///--- Compute circumradius --- /// Compute circumradius for interval embedded in R^1 inline void compute_circumradius_interval_1d(double & circumradius, double volume) { // Compute circumradius; in 1D it is equal to half the cell length circumradius = volume / 2.0; } /// Compute circumradius for interval embedded in R^2 inline void compute_circumradius_interval_2d(double & circumradius, double volume) { // Compute circumradius of interval in 2D (1/2 volume) circumradius = volume / 2.0; } /// Compute circumradius for interval embedded in R^3 inline void compute_circumradius_interval_3d(double & circumradius, double volume) { // Compute circumradius of interval in 3D (1/2 volume) circumradius = volume / 2.0; } /// Compute circumradius for triangle embedded in R^2 inline void compute_circumradius_triangle_2d(double & circumradius, const double vertex_coordinates[6], const double J[UFC_GDIM_2*UFC_TDIM_2], double volume) { // Compute circumradius of triangle in 2D const double v1v2 = std::sqrt( (vertex_coordinates[4] - vertex_coordinates[2])*(vertex_coordinates[4] - vertex_coordinates[2]) + (vertex_coordinates[5] - vertex_coordinates[3])*(vertex_coordinates[5] - vertex_coordinates[3]) ); const double v0v2 = std::sqrt(J[3]*J[3] + J[1]*J[1]); const double v0v1 = std::sqrt(J[0]*J[0] + J[2]*J[2]); circumradius = 0.25*(v1v2*v0v2*v0v1) / volume; } /// Compute circumradius for triangle embedded in R^3 inline void compute_circumradius_triangle_3d(double & circumradius, const double vertex_coordinates[9], const double J[UFC_GDIM_3*UFC_TDIM_2], double volume) { // Compute circumradius of triangle in 3D const double v1v2 = std::sqrt( (vertex_coordinates[6] - vertex_coordinates[3])*(vertex_coordinates[6] - vertex_coordinates[3]) + (vertex_coordinates[7] - vertex_coordinates[4])*(vertex_coordinates[7] - vertex_coordinates[4]) + (vertex_coordinates[8] - vertex_coordinates[5])*(vertex_coordinates[8] - vertex_coordinates[5])); const double v0v2 = std::sqrt( J[3]*J[3] + J[1]*J[1] + J[5]*J[5]); const double v0v1 = std::sqrt( J[0]*J[0] + J[2]*J[2] + J[4]*J[4]); circumradius = 0.25*(v1v2*v0v2*v0v1) / volume; } /// Compute circumradius for tetrahedron embedded in R^3 inline void compute_circumradius_tetrahedron_3d(double & circumradius, const double vertex_coordinates[12], const double J[UFC_GDIM_3*UFC_TDIM_3], double volume) { // Compute circumradius const double v1v2 = std::sqrt( (vertex_coordinates[6] - vertex_coordinates[3])*(vertex_coordinates[6] - vertex_coordinates[3]) + (vertex_coordinates[7] - vertex_coordinates[4])*(vertex_coordinates[7] - vertex_coordinates[4]) + (vertex_coordinates[8] - vertex_coordinates[5])*(vertex_coordinates[8] - vertex_coordinates[5]) ); const double v0v2 = std::sqrt(J[1]*J[1] + J[4]*J[4] + J[7]*J[7]); const double v0v1 = std::sqrt(J[0]*J[0] + J[3]*J[3] + J[6]*J[6]); const double v0v3 = std::sqrt(J[2]*J[2] + J[5]*J[5] + J[8]*J[8]); const double v1v3 = std::sqrt( (vertex_coordinates[ 9] - vertex_coordinates[3])*(vertex_coordinates[ 9] - vertex_coordinates[3]) + (vertex_coordinates[10] - vertex_coordinates[4])*(vertex_coordinates[10] - vertex_coordinates[4]) + (vertex_coordinates[11] - vertex_coordinates[5])*(vertex_coordinates[11] - vertex_coordinates[5]) ); const double v2v3 = std::sqrt( (vertex_coordinates[ 9] - vertex_coordinates[6])*(vertex_coordinates[ 9] - vertex_coordinates[6]) + (vertex_coordinates[10] - vertex_coordinates[7])*(vertex_coordinates[10] - vertex_coordinates[7]) + (vertex_coordinates[11] - vertex_coordinates[8])*(vertex_coordinates[11] - vertex_coordinates[8]) ); const double la = v1v2*v0v3; const double lb = v0v2*v1v3; const double lc = v0v1*v2v3; const double s = 0.5*(la+lb+lc); const double area = std::sqrt(s*(s-la)*(s-lb)*(s-lc)); circumradius = area / (6.0*volume); } ///--- Compute max facet edge lengths --- /// Compute min edge length in facet of tetrahedron embedded in R^3 inline void compute_min_facet_edge_length_tetrahedron_3d(double & min_edge_length, unsigned int facet, const double vertex_coordinates[3*4]) { // TODO: Extract compute_facet_edge_lengths_tetrahedron_3d(), reuse between min/max functions double edge_lengths_sqr[3]; for (unsigned int edge = 0; edge < 3; ++edge) { const unsigned int vertex0 = tetrahedron_facet_edge_vertices[facet][edge][0]; const unsigned int vertex1 = tetrahedron_facet_edge_vertices[facet][edge][1]; edge_lengths_sqr[edge] = (vertex_coordinates[3*vertex1 + 0] - vertex_coordinates[3*vertex0 + 0])*(vertex_coordinates[3*vertex1 + 0] - vertex_coordinates[3*vertex0 + 0]) + (vertex_coordinates[3*vertex1 + 1] - vertex_coordinates[3*vertex0 + 1])*(vertex_coordinates[3*vertex1 + 1] - vertex_coordinates[3*vertex0 + 1]) + (vertex_coordinates[3*vertex1 + 2] - vertex_coordinates[3*vertex0 + 2])*(vertex_coordinates[3*vertex1 + 2] - vertex_coordinates[3*vertex0 + 2]); } min_edge_length = std::sqrt(std::min(std::min(edge_lengths_sqr[1], edge_lengths_sqr[1]), edge_lengths_sqr[2])); } ///--- Compute max facet edge lengths --- /// Compute max edge length in facet of tetrahedron embedded in R^3 inline void compute_max_facet_edge_length_tetrahedron_3d(double & max_edge_length, unsigned int facet, const double vertex_coordinates[12]) { // TODO: Extract compute_facet_edge_lengths_tetrahedron_3d(), reuse between min/max functions double edge_lengths_sqr[3]; for (unsigned int edge = 0; edge < 3; ++edge) { const unsigned int vertex0 = tetrahedron_facet_edge_vertices[facet][edge][0]; const unsigned int vertex1 = tetrahedron_facet_edge_vertices[facet][edge][1]; edge_lengths_sqr[edge] = (vertex_coordinates[3*vertex1 + 0] - vertex_coordinates[3*vertex0 + 0])*(vertex_coordinates[3*vertex1 + 0] - vertex_coordinates[3*vertex0 + 0]) + (vertex_coordinates[3*vertex1 + 1] - vertex_coordinates[3*vertex0 + 1])*(vertex_coordinates[3*vertex1 + 1] - vertex_coordinates[3*vertex0 + 1]) + (vertex_coordinates[3*vertex1 + 2] - vertex_coordinates[3*vertex0 + 2])*(vertex_coordinates[3*vertex1 + 2] - vertex_coordinates[3*vertex0 + 2]); } max_edge_length = std::sqrt(std::max(std::max(edge_lengths_sqr[0], edge_lengths_sqr[1]), edge_lengths_sqr[2])); } //} // TODO: Wrap all in namespace ufc #endif ffc-1.6.0/ufc/ufc_shared_ptr_classes.i000066400000000000000000000005721255571034100177170ustar00rootroot00000000000000// Declare which classes should be stored using shared_ptr %shared_ptr(ufc::dofmap) %shared_ptr(ufc::finite_element) %shared_ptr(ufc::function) %shared_ptr(ufc::integral) %shared_ptr(ufc::cell_integral) %shared_ptr(ufc::exterior_facet_integral) %shared_ptr(ufc::interior_facet_integral) %shared_ptr(ufc::vertex_integral) %shared_ptr(ufc::custom_integral) %shared_ptr(ufc::form) ffc-1.6.0/ufc_benchmark/000077500000000000000000000000001255571034100150465ustar00rootroot00000000000000ffc-1.6.0/ufc_benchmark/Makefile000066400000000000000000000017441255571034100165140ustar00rootroot00000000000000# # An overly complicated makefile for compiling a swig module. # MODULENAME=ufc_benchmark CXX=g++ ufcinclude=-I../../../ufc # Python location and version PYTHONROOT:=$(shell python -c 'import sys; print sys.prefix') PYTHONVER:=$(shell python -c 'import sys; print sys.version[:3]') all: _$(MODULENAME).so echo Done. # link module _$(MODULENAME).so: $(MODULENAME)_wrap.o $(MODULENAME).o $(CXX) -shared -o _$(MODULENAME).so $(MODULENAME)_wrap.o $(MODULENAME).o # compile wrapper $(MODULENAME)_wrap.o: $(MODULENAME)_wrap.cxx $(CXX) -I$(PYTHONROOT)/include/python$(PYTHONVER) -o $@ -c $< # generate wrapper $(MODULENAME)_wrap.cxx: $(MODULENAME).i $(MODULENAME).h swig -c++ -python $(ufcinclude) $(MODULENAME).i # compile module code $(MODULENAME).o: *.h *.cpp $(CXX) -c -o $(MODULENAME).o $(MODULENAME).cpp clean: rm -f $(MODULENAME).o rm -f $(MODULENAME).py rm -f $(MODULENAME).pyc rm -f _$(MODULENAME).so rm -f $(MODULENAME)_wrap.cxx rm -f $(MODULENAME)_wrap.o rm -rf build ffc-1.6.0/ufc_benchmark/setup.py000066400000000000000000000016671255571034100165720ustar00rootroot00000000000000#!/usr/bin/env python from distutils.core import setup from distutils.core import Extension import os # the buggy swig-support in distutils doesn't manage to invoke g++, uses gcc... os.system("make ufc_benchmark_wrap.cxx") extension = Extension('_ufc_benchmark', ['ufc_benchmark.cpp', 'ufc_benchmark_wrap.cxx'], language="c++", include_dirs=["../../../ufc"]) setup(### metadata: name = 'ufc_benchmark', version = '1.1.2', author = 'Martin Sandve Alnes', author_email = 'martinal@simula.no', maintainer = 'Martin Sandve Alnes', maintainer_email = 'martinal@simula.no', url = 'http://www.fenicsproject.org', description = 'Benchmark utility for UFC implementations.', download_url = 'https://bitbucket/fenics-project/ufc', ### contents: py_modules = ['ufc_benchmark'], ext_modules = [extension], ) ffc-1.6.0/ufc_benchmark/ufc_benchmark.cpp000066400000000000000000000241531255571034100203460ustar00rootroot00000000000000// This is utility code for UFC (Unified Form-assembly Code) v. 1.6.0. // This code is released into the public domain. // // The FEniCS Project (http://www.fenicsproject.org/) 2006-2015. #include #include using std::cout; using std::endl; using std::vector; #include #define TMIN 3.0 #define MMIN 1000 #include "ufc_data.h" #include "ufc_reference_cell.h" #include "ufc_benchmark.h" clock_t __tic_time; void tic() { __tic_time = clock(); } double toc() { clock_t __toc_time = clock(); double elapsed_time = ((double) (__toc_time - __tic_time)) / CLOCKS_PER_SEC; return elapsed_time; } // Adaptive timing: make sure we run for at least TMIN to get reliable results double time_tabulate_tensor(ufc::cell_integral& integral, double *A, const double * const * w, const ufc::cell & c) { std::size_t M = MMIN; while ( true ) { tic(); for (std::size_t i = 0; i < M; i++) { integral.tabulate_tensor(A, w, c); } double t = toc(); if ( t >= TMIN ) return t / static_cast(M); M *= 10; cout << "Elapsed time too short, increasing number of iterations to" << M << endl; } return 0.0; } // Adaptive timing: make sure we run for at least TMIN to get reliable results double time_tabulate_tensor(ufc::exterior_facet_integral& integral, double *A, const double * const * w, const ufc::cell & c, std::size_t facet) { std::size_t M = MMIN; while ( true ) { tic(); for (std::size_t i = 0; i < M; i++) { integral.tabulate_tensor(A, w, c, facet); } double t = toc(); if ( t >= TMIN ) return t / static_cast(M); M *= 10; cout << "Elapsed time too short, increasing number of iterations to" << M << endl; } return 0.0; } // Benchmark all integrals of a form. vector< vector > benchmark(const ufc::form & form, bool print_tensor) { // construct and allocate some stuff ufc::ufc_data data(form); // create a reference cell geometry ufc::reference_cell c(data.elements[0]->cell_shape()); // data structures for times vector cell_times(form.num_cell_domains()); vector exterior_facet_times(form.num_exterior_facet_domains()); vector interior_facet_times(form.num_interior_facet_domains()); // benchmark all cell integrals for(unsigned i = 0; i < form.num_cell_domains(); i++) { cell_times[i] = time_tabulate_tensor(*data.cell_domains[i], data.A, data.w, c); if(print_tensor) { cout << "Cell element tensor " << i << ":" << endl; data.print_tensor(); cout << endl; } } // benchmark all exterior facet integrals for(unsigned i = 0; i < form.num_exterior_facet_domains(); i++) { std::size_t facet = 0; // TODO: would it be interesting to time all facets? exterior_facet_times[i] = time_tabulate_tensor(*data.exterior_facet_domains[i], data.A, data.w, c, facet); if(print_tensor) { cout << "Exterior facet element tensor " << i << ":" << endl; data.print_tensor(); cout << endl; } } // benchmark all interior facet integrals /* // TODO: If somebody needs this, please implement it! Need two cells, and larger A. for(unsigned i = 0; i < form.num_interior_facet_domains(); i++) { std::size_t facet = 0; // TODO: would it be interesting to time all facets? interior_facet_times[i] = time_tabulate_tensor(*data.interior_facet_domains[i], data.A, data.w, c, facet); if(print_tensor) { cout << "Interior facet element tensor " << i << ":" << endl; data.print_tensor(); cout << endl; } } */ vector< vector > result(3); result[0] = cell_times; result[1] = exterior_facet_times; result[2] = interior_facet_times; return result; } vector< vector > tabulate_cell_tensor(const ufc::form & form, vector< vector > w, int domain) { ufc::ufc_data data(form); // copy w to the appropriate array if(data.num_coefficients != w.size()) throw std::runtime_error("Wrong number of coefficients"); for(unsigned i=0; icell_shape()); // tabulate the tensor data.cell_integrals[domain]->tabulate_tensor(data.A, data.w, c); // copy element tensor to stl-structure for easy returning to python (should perhaps rather use numpy and some typemaps, but I'm lazy) vector< vector > A; if(data.rank == 2) { A.resize(data.dimensions[0]); for(unsigned i=0; i > tabulate_cell_integral(const std::shared_ptr form, std::vector< std::vector > w, ufc::cell cell, int domain) { ufc::ufc_data data(*form); // copy w to the appropriate array if(data.num_coefficients != w.size()) throw std::runtime_error("Wrong number of coefficients"); for(unsigned i=0; itabulate_tensor(data.A, data.w, cell); // copy element tensor to stl-structure for easy returning to python (should perhaps rather use numpy and some typemaps, but I'm lazy) vector< vector > A; if(data.rank == 2) { A.resize(data.dimensions[0]); for(unsigned i=0; i > tabulate_exterior_facet_integral(const std::shared_ptr form, std::vector< std::vector > w, ufc::cell& cell, int facet, int domain) { ufc::ufc_data data(*form); // copy w to the appropriate array if(data.num_coefficients != w.size()) throw std::runtime_error("Wrong number of coefficients"); for(unsigned i=0; itabulate_tensor(data.A, data.w, cell, facet); // copy element tensor to stl-structure for easy returning to python (should perhaps rather use numpy and some typemaps, but I'm lazy) vector< vector > A; if(data.rank == 2) { A.resize(data.dimensions[0]); for(unsigned i=0; i > tabulate_interior_facet_integral(const std::shared_ptr form, std::vector< std::vector > macro_w,\ ufc::cell& cell0, ufc::cell& cell1, int facet_0, int facet_1, int domain) { ufc::ufc_data data(*form); // copy w to the appropriate array if(data.num_coefficients != macro_w.size()) throw std::runtime_error("Wrong number of coefficients"); for(unsigned i=0; itabulate_tensor(data.macro_A, data.macro_w, cell0, cell1, facet_0, facet_1); // copy element tensor to stl-structure for easy returning to python (should perhaps rather use numpy and some typemaps, but I'm lazy) vector< vector > A; if(data.rank == 2) { A.resize(2*data.dimensions[0]); for(unsigned i=0; i<2*data.dimensions[0]; i++) { A[i].resize(2*data.dimensions[1]); for(unsigned j=0; j<2*data.dimensions[1]; j++) { A[i][j] = data.macro_A[i*2*data.dimensions[1] + j]; } } } else if(data.rank == 1) { A.resize(2*data.dimensions[0]); for(unsigned i=0; i<2*data.dimensions[0]; i++) { A[i].resize(1); A[i][0] = data.macro_A[i]; } } else if(data.rank == 0) { A.resize(1); A[0].resize(1); A[0][0] = data.macro_A[0]; } else { throw std::runtime_error("rank != 0,1,2 not implemented"); } return A; } ffc-1.6.0/ufc_benchmark/ufc_benchmark.h000066400000000000000000000046431255571034100200150ustar00rootroot00000000000000// This is utility code for UFC (Unified Form-assembly Code) v 1.6.0. // This code is released into the public domain. // // The FEniCS Project (http://www.fenicsproject.org/) 2006-2015. #ifndef __UFC_BENCHMARK_H__ #define __UFC_BENCHMARK_H__ #include "ufc.h" #include #include /* Benchmark time to run tabulate_tensor for all integrals in a form. * * Uses a reference cell and one-cell mesh, and sets all w_ij = 1.0. */ std::vector< std::vector > benchmark(const ufc::form & form, bool print_tensors); /* Compute one element tensor on the reference cell with the given coefficients. */ std::vector< std::vector > tabulate_cell_tensor(const ufc::form & form, std::vector< std::vector > w, int domain); /* Compute one cell integral. */ std::vector< std::vector > tabulate_cell_integral(const std::shared_ptr form, std::vector< std::vector > w, ufc::cell cell, int domain); /* Compute one exterior facet integral. */ std::vector< std::vector > tabulate_exterior_facet_integral(const std::shared_ptr form, std::vector< std::vector > w, ufc::cell& cell, int facet, int domain); /* Compute one interior facet integral. */ std::vector< std::vector > tabulate_interior_facet_integral(const std::shared_ptr form, std::vector< std::vector > macro_w, ufc::cell& cell0, ufc::cell& cell1, int facet_0, int facet_1, int domain); #endif ffc-1.6.0/ufc_benchmark/ufc_benchmark.i000066400000000000000000000030151255571034100200060ustar00rootroot00000000000000%module ufc_benchmark // ------------------------ STL stuff %{ #include %} %include stl.i %include std_vector.i %include std_carray.i %template(vector_double) std::vector; %typedef std::vector vector_double; %template(vector_vector_double) std::vector< std::vector >; %typedef std::vector< std::vector > vector_vector_double; %template(vector_std_t) std::vector< std::size_t >; %typedef std::vector< std::size_t > vector_size_t; %template(vector_vector_size_t) std::vector< std::vector< std::size_t > >; %typedef std::vector< std::vector< std::size_t > > vector_vector_size_t; // ------------------------ UFC stuff %import ufc.i %{ #include "ufc.h" #include "ufc_benchmark.h" #include "ufc_reference_cell.h" %} %include "ufc.h" %include "ufc_benchmark.h" %include "ufc_reference_cell.h" // ----------------------- Reference to shared pointer utility %{ class NoDeleter { public: void operator()(ufc::form *) {} }; std::shared_ptr form_ptr(ufc::form * form) { return std::shared_ptr(form, NoDeleter()); } %} class NoDeleter { public: void operator()(ufc::form *) {} }; std::shared_ptr form_ptr(ufc::form * form) { return std::shared_ptr(form, NoDeleter()); } // ----------------------- Python wrapper for benchmark %pythoncode{ def benchmark_forms(forms, print_tensors): import gc gc.collect() times = [] for f in forms: res = benchmark(f, print_tensors) times.append(tuple(res)) return times } ffc-1.6.0/ufc_benchmark/ufc_data.h000066400000000000000000000112501255571034100167640ustar00rootroot00000000000000// This is utility code for UFC (Unified Form-assembly Code) v 1.6.0. // This code is released into the public domain. // // The FEniCS Project (http://www.fenicsproject.org/) 2006-2015. #ifndef __UFC_DATA_H__ #define __UFC_DATA_H__ #include #include #include #include namespace ufc { class ufc_data { public: ufc_data(const ufc::form & form): form(form) { // short forms of dimensions rank = form.rank(); num_coefficients = form.num_coefficients(); num_arguments = rank + num_coefficients; // construct all dofmaps and elements dofmaps.resize(num_arguments); elements.resize(num_arguments); dimensions = new unsigned[num_arguments]; for(unsigned i=0; imax_local_dimension(); if(dimensions[i] != elements[i]->space_dimension()) throw std::runtime_error("Mismatching dimensions between finite_elements and dofmaps!"); if(elements[0]->cell_shape() != elements[i]->cell_shape()) throw std::runtime_error("Mismatching cell shapes in elements!"); } // construct all integral objects cell_integrals.resize(form.num_cell_domains()); for(unsigned i=0; i dofmaps; vector< ufc::finite_element * > elements; vector< ufc::cell_integral *> cell_integrals; vector< ufc::exterior_facet_integral *> exterior_facet_integrals; vector< ufc::interior_facet_integral *> interior_facet_integrals; unsigned rank; unsigned num_coefficients; unsigned num_arguments; unsigned A_size; unsigned * dimensions; double * A; double * macro_A; double ** w; double ** macro_w; void print_tensor() { int dim0 = 1; int dim1 = 1; if(rank == 1) { dim1 = dimensions[0]; } if(rank == 2) { dim0 = dimensions[0]; dim1 = dimensions[1]; } cout << "[" << endl; int k=0; for(int ii=0; ii #include namespace ufc { /// Description of a reference cell, for debugging and testing UFC code. class reference_cell: public ufc::cell { public: /// Constructor reference_cell(ufc::shape s) { cell_shape = s; num_entities[0] = 0; num_entities[1] = 0; num_entities[2] = 0; num_entities[3] = 0; // Get topological dimension and number of entities in a cell of this type. switch(s) { case interval: topological_dimension = 1; num_entities[0] = 2; num_entities[1] = 1; break; case triangle: topological_dimension = 2; num_entities[0] = 3; num_entities[1] = 3; num_entities[2] = 1; break; case quadrilateral: topological_dimension = 2; num_entities[0] = 4; num_entities[1] = 4; num_entities[2] = 1; break; case tetrahedron: topological_dimension = 3; num_entities[0] = 4; num_entities[1] = 6; num_entities[2] = 4; num_entities[3] = 1; break; case hexahedron: topological_dimension = 3; num_entities[0] = 8; num_entities[1] = 12; num_entities[2] = 6; num_entities[3] = 1; break; default: throw std::runtime_error("Invalid shape."); } // Assume same geometric dimension. geometric_dimension = topological_dimension; // Fill global indices like we had a single-cell mesh. entity_indices = new std::size_t*[topological_dimension+1]; for(std::size_t i = 0; i <= topological_dimension; i++) { entity_indices[i] = new std::size_t[num_entities[i]]; for(std::size_t j = 0; j < num_entities[i]; j++) { entity_indices[i][j] = j; } } // Allocate an empty array of vertex coordinates. coordinates = new double*[num_entities[0]]; for(std::size_t i = 0; i < num_entities[0]; i++) { coordinates[i] = new double[geometric_dimension]; for(std::size_t j = 0; j < geometric_dimension; j++) { coordinates[i][j] = 0.0; } } // Fill coordinates with reference cell definition. switch(s) { case interval: coordinates[0][0] = 0.0; coordinates[1][0] = 1.0; break; case triangle: coordinates[0][0] = 0.0; coordinates[0][1] = 0.0; coordinates[1][0] = 1.0; coordinates[1][1] = 0.0; coordinates[2][0] = 0.0; coordinates[2][1] = 1.0; break; case quadrilateral: coordinates[0][0] = 0.0; coordinates[0][1] = 0.0; coordinates[1][0] = 1.0; coordinates[1][1] = 0.0; coordinates[2][0] = 1.0; coordinates[2][1] = 1.0; coordinates[3][0] = 0.0; coordinates[3][1] = 1.0; break; case tetrahedron: coordinates[0][0] = 0.0; coordinates[0][1] = 0.0; coordinates[0][2] = 0.0; coordinates[1][0] = 1.0; coordinates[1][1] = 0.0; coordinates[1][2] = 0.0; coordinates[2][0] = 0.0; coordinates[2][1] = 1.0; coordinates[2][2] = 0.0; coordinates[3][0] = 0.0; coordinates[3][1] = 0.0; coordinates[3][2] = 1.0; break; case hexahedron: coordinates[0][0] = 0.0; coordinates[0][1] = 0.0; coordinates[0][2] = 0.0; coordinates[1][0] = 1.0; coordinates[1][1] = 0.0; coordinates[1][2] = 0.0; coordinates[2][0] = 1.0; coordinates[2][1] = 1.0; coordinates[2][2] = 0.0; coordinates[3][0] = 0.0; coordinates[3][1] = 1.0; coordinates[3][2] = 0.0; coordinates[4][0] = 0.0; coordinates[4][1] = 0.0; coordinates[4][2] = 1.0; coordinates[5][0] = 1.0; coordinates[5][1] = 0.0; coordinates[5][2] = 1.0; coordinates[6][0] = 1.0; coordinates[6][1] = 1.0; coordinates[6][2] = 1.0; coordinates[7][0] = 0.0; coordinates[7][1] = 1.0; coordinates[7][2] = 1.0; break; } } /// Destructor virtual ~reference_cell() { for(std::size_t i = 0; i <= topological_dimension; i++) { delete [] entity_indices[i]; } delete [] entity_indices; for(std::size_t i = 0; i < num_entities[0]; i++) { delete [] coordinates[i]; } delete [] coordinates; } /// The number of entities of a particular dimension std::size_t num_entities[4]; }; /// Description of a reference cell, for debugging and testing UFC code. class Cell: public ufc::cell { public: /// Constructor Cell(std::size_t top, std::size_t geo, std::vector< std::vector > coords, std::vector< std::size_t> num_ents): ufc::cell(), num_entities(num_ents) { topological_dimension = top; geometric_dimension = geo; num_entities[0] = coords.size(); // Fill global indices // entity_indices = new std::size_t*[topological_dimension+1]; // for(std::size_t i = 0; i <= topological_dimension; i++) // { // entity_indices[i] = new std::size_t[num_entities[i]]; // for(std::size_t j = 0; j < num_entities[i]; j++) // { // entity_indices[i][j] = j; // } // } for(std::size_t i = 0; i < num_ents.size(); i++) num_entities[i] = num_ents[i]; // Allocate an empty array of vertex coordinates. coordinates = new double*[coords.size()]; for(std::size_t i = 0; i < coords.size(); i++) { coordinates[i] = new double[geometric_dimension]; for(std::size_t j = 0; j < geometric_dimension; j++) { coordinates[i][j] = coords[i][j]; } } } /// Destructor virtual ~Cell() { // for(std::size_t i = 0; i <= topological_dimension; i++) // { // delete [] entity_indices[i]; // } // delete [] entity_indices; for(std::size_t i = 0; i < num_entities[0]; i++) { delete [] coordinates[i]; } delete [] coordinates; } /// The number of entities of a particular dimension std::vector num_entities; }; /// Consistent data for a mesh consisting of a single reference cell, for debugging and testing UFC code. class reference_mesh: public ufc::mesh { public: /// Constructor reference_mesh(ufc::shape s): c(s) { topological_dimension = c.topological_dimension; geometric_dimension = c.geometric_dimension; // Set global number of entities of each topological dimension to that of a single cell. num_entities = new std::size_t[topological_dimension+1]; for(std::size_t i = 0; i <= topological_dimension; i++) { num_entities[i] = c.num_entities[i]; } } /// Destructor virtual ~reference_mesh() { delete [] num_entities; } /// A reference cell, the only cell in this mesh. reference_cell c; }; /// Consistent data for a mesh consisting of a single reference cell, for debugging and testing UFC code. class Mesh: public ufc::mesh { public: /// Constructor Mesh(std::size_t top, std::size_t geo, std::vector ents)//: ufc::mesh() { topological_dimension = top; geometric_dimension = geo; // Set global number of entities of each topological dimension to that of a single cell. num_entities = new std::size_t[topological_dimension+1]; for(std::size_t i = 0; i <= topological_dimension; i++) { num_entities[i] = ents[i]; } } /// Destructor virtual ~Mesh() { delete [] num_entities; } }; } #endif