pax_global_header00006660000000000000000000000064145072127710014520gustar00rootroot0000000000000052 comment=829854a630622b1a12fd87822987a406a05aa612 ffcx-0.7.0/000077500000000000000000000000001450721277100124525ustar00rootroot00000000000000ffcx-0.7.0/.github/000077500000000000000000000000001450721277100140125ustar00rootroot00000000000000ffcx-0.7.0/.github/FUNDING.yml000066400000000000000000000002141450721277100156240ustar00rootroot00000000000000# These are supported funding model platforms github: FEniCS # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] ffcx-0.7.0/.github/workflows/000077500000000000000000000000001450721277100160475ustar00rootroot00000000000000ffcx-0.7.0/.github/workflows/build-wheels.yml000066400000000000000000000042751450721277100211660ustar00rootroot00000000000000name: Build wheels # By default this action does not push to test or production PyPI. The wheels # are available as an artifact that can be downloaded and tested locally. on: workflow_dispatch: inputs: ffcx_ref: description: "FFCx git ref to checkout" default: "main" type: string test_pypi_publish: description: "Publish to Test PyPi (true | false)" default: false type: boolean pypi_publish: description: "Publish to PyPi (true | false)" default: false type: boolean workflow_call: inputs: ffcx_ref: description: "FFCx git ref to checkout" default: "main" type: string test_pypi_publish: description: "Publish to Test PyPi (true | false)" default: false type: boolean pypi_publish: description: "Publish to PyPi (true | false))" default: false type: boolean jobs: build: name: Build wheels and source distributions runs-on: ubuntu-latest steps: - name: Checkout FFCx uses: actions/checkout@v3 with: ref: ${{ github.event.inputs.ffcx_ref }} - name: Upgrade pip and setuptools run: python -m pip install setuptools pip build --upgrade - name: Build sdist and wheel run: python -m build . - uses: actions/upload-artifact@v2 with: path: dist/* upload_pypi: name: Upload to PyPI (optional) needs: [build] runs-on: ubuntu-latest steps: - uses: actions/download-artifact@v3 with: name: artifact path: dist - name: Publish to PyPI uses: pypa/gh-action-pypi-publish@v1.5.1 if: ${{ github.event.inputs.pypi_publish == 'true' }} with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} repository_url: https://upload.pypi.org/legacy/ - name: Publish to Test PyPI uses: pypa/gh-action-pypi-publish@v1.5.1 if: ${{ github.event.inputs.test_pypi_publish == 'true' }} with: user: __token__ password: ${{ secrets.PYPI_TEST_TOKEN }} repository_url: https://test.pypi.org/legacy/ ffcx-0.7.0/.github/workflows/dolfin-tests.yml000066400000000000000000000060661450721277100212150ustar00rootroot00000000000000# This workflow will install Basix, FFCx, DOLFINx and run the DOLFINx unit tests. name: DOLFINx integration on: pull_request: branches: - main workflow_dispatch: inputs: dolfinx_ref: description: "DOLFINx branch or tag" default: "main" type: string basix_ref: description: "Basix branch or tag" default: "main" type: string ufl_ref: description: "UFL branch or tag" default: "main" type: string jobs: build: name: Run DOLFINx tests runs-on: ubuntu-latest container: fenicsproject/test-env:nightly-openmpi env: PETSC_ARCH: linux-gnu-complex-32 OMPI_ALLOW_RUN_AS_ROOT: 1 OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 OMPI_MCA_rmaps_base_oversubscribe: 1 OMPI_MCA_plm: isolated OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_mpi_yield_when_idle: 1 OMPI_MCA_hwloc_base_binding_policy: none steps: - uses: actions/checkout@v3 - name: Install dependencies (Python) run: | python3 -m pip install --upgrade pip - name: Install UFL and Basix (default branches/tags) if: github.event_name != 'workflow_dispatch' run: | python3 -m pip install git+https://github.com/FEniCS/ufl.git python3 -m pip install git+https://github.com/FEniCS/basix.git - name: Install UFL and Basix (specified branches/tags) if: github.event_name == 'workflow_dispatch' run: | python3 -m pip install git+https://github.com/FEniCS/ufl.git@${{ github.event.inputs.ufl_ref }} python3 -m pip install git+https://github.com/FEniCS/basix.git@${{ github.event.inputs.basix_ref }} - name: Install FFCx run: | pip3 install . - name: Get DOLFINx source (default branch/tag) if: github.event_name != 'workflow_dispatch' uses: actions/checkout@v3 with: path: ./dolfinx repository: FEniCS/dolfinx ref: main - name: Get DOLFINx source (specified branch/tag) if: github.event_name == 'workflow_dispatch' uses: actions/checkout@v3 with: path: ./dolfinx repository: FEniCS/dolfinx ref: ${{ github.event.inputs.dolfinx_ref }} - name: Install DOLFINx run: | cmake -G Ninja -DCMAKE_BUILD_TYPE=Developer -B build -S dolfinx/cpp/ cmake --build build cmake --install build pip3 -v install --global-option build --global-option --debug dolfinx/python/ - name: Build DOLFINx C++ unit tests run: | cmake -G Ninja -DCMAKE_BUILD_TYPE=Developer -B build/test/ -S dolfinx/cpp/test/ cmake --build build/test - name: Run DOLFINx C++ unit tests run: | cd build/test ctest -V --output-on-failure -R unittests - name: Run DOLFINx Python unit tests run: python3 -m pytest -n auto dolfinx/python/test/unit - name: Run DOLFINx Python demos run: python3 -m pytest -n=2 -m serial dolfinx/python/demo/test.py ffcx-0.7.0/.github/workflows/pythonapp.yml000066400000000000000000000117141450721277100206200ustar00rootroot00000000000000# This workflow will install Python dependencies, run tests and lint # with a single version of Python For more information see: # https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions name: FFCx CI on: push: branches: - "**" tags: - "v*" pull_request: branches: - main workflow_dispatch: jobs: build: runs-on: ${{ matrix.os }} strategy: matrix: os: [ubuntu-latest] python-version: ['3.8', '3.9', '3.10', "3.11"] env: CC: gcc-10 CXX: g++-10 steps: - name: Checkout FFCx uses: actions/checkout@v3 - name: Set up Python uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install dependencies (non-Python, Linux) if: runner.os == 'Linux' run: | sudo apt-get install -y graphviz libgraphviz-dev ninja-build pkg-config - name: Install dependencies (non-Python, macOS) if: runner.os == 'macOS' run: brew install graphviz ninja pkg-config - name: Install FEniCS dependencies (Python) run: | python -m pip install git+https://github.com/FEniCS/ufl.git@release python -m pip install git+https://github.com/FEniCS/basix.git@release - name: Install FFCx run: | python -m pip install .[ci] - name: Lint with flake8 run: | python -m flake8 --statistics ffcx/ test/ - name: Static check with mypy run: | python -m mypy ffcx/ if: matrix.python-version != '3.11' - name: isort checks (non-blocking) continue-on-error: true run: | python3 -m isort --check . - name: Check documentation style run: | python -m pydocstyle . - name: Run units tests run: | python -m pytest -n auto --cov=ffcx/ --junitxml=junit/test-results-${{ matrix.os }}-${{ matrix.python-version }}.xml test/ - name: Upload to Coveralls if: ${{ github.repository == 'FEniCS/ffcx' && github.head_ref == '' && matrix.os == 'ubuntu-latest' && matrix.python-version == '3.8' }} env: COVERALLS_REPO_TOKEN: ${{ secrets.COVERALLS_REPO_TOKEN }} run: | python -m coveralls continue-on-error: true - name: Upload pytest results uses: actions/upload-artifact@v3 with: name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }} path: junit/test-results-${{ matrix.os }}-${{ matrix.python-version }}.xml # Use always() to always run this step to publish test results # when there are test failures if: always() - name: Get UFL uses: actions/checkout@v3 with: path: ./ufl repository: FEniCS/ufl ref: main - name: Run FFCx and UFL demos run: | mv ufl/demo/* demo/ pytest demo/test_demos.py rm -Rf ufl/ - name: Build documentation run: | cd doc make html - name: Upload documentation artifact uses: actions/upload-artifact@v3 with: name: doc-${{ matrix.os }}-${{ matrix.python-version }} path: doc/build/html/ retention-days: 2 if-no-files-found: error - name: Checkout FEniCS/docs if: ${{ github.repository == 'FEniCS/ffcx' && ( github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') ) && runner.os == 'Linux' && matrix.python-version == 3.8 }} uses: actions/checkout@v3 with: repository: "FEniCS/docs" path: "docs" ssh-key: "${{ secrets.SSH_GITHUB_DOCS_PRIVATE_KEY }}" - name: Set version name if: ${{ github.repository == 'FEniCS/ffcx' && ( github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') ) && runner.os == 'Linux' && matrix.python-version == 3.8 }} run: | echo "VERSION_NAME=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - name: Copy documentation into repository if: ${{ github.repository == 'FEniCS/ffcx' && ( github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') ) && runner.os == 'Linux' && matrix.python-version == 3.8 }} run: | cd docs git rm -r --ignore-unmatch ffcx/${{ env.VERSION_NAME }} mkdir -p ffcx/${{ env.VERSION_NAME }} cp -r ../doc/build/html/* ffcx/${{ env.VERSION_NAME }} - name: Commit and push documentation to FEniCS/docs if: ${{ github.repository == 'FEniCS/ffcx' && ( github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') ) && runner.os == 'Linux' && matrix.python-version == 3.8 }} run: | cd docs git config --global user.email "fenics@github.com" git config --global user.name "FEniCS GitHub Actions" git add --all git commit --allow-empty -m "Python FEniCS/ffcx@${{ github.sha }}" git push ffcx-0.7.0/.isort.cfg000066400000000000000000000002361450721277100143520ustar00rootroot00000000000000[settings] src_paths = ffcx,test,demo known_first_party = basix,ufl known_third_party = numpy,pytest sections=FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER ffcx-0.7.0/AUTHORS000066400000000000000000000043701450721277100135260ustar00rootroot00000000000000Credits for FFC =============== Main authors: Anders Logg email: logg@simula.no www: http://home.simula.no/~logg/ Kristian B. Ølgaard email: k.b.oelgaard@gmail.com Marie Rognes email: meg@simula.no Main contributors: Garth N. Wells email: gnw20@cam.ac.uk www: http://www.eng.cam.ac.uk/~gnw20/ Martin Sandve Alnæs email: martinal@simula.no Contributors: Jan Blechta email: blechta@karlin.mff.cuni.cz Peter Brune email: brune@uchicago.edu Joachim B Haga email: jobh@broadpark.no Johan Jansson email: johanjan@math.chalmers.se www: http://www.math.chalmers.se/~johanjan/ Robert C. Kirby email: kirby@cs.uchicago.edu www: http://people.cs.uchicago.edu/~kirby/ Matthew G. Knepley email: knepley@mcs.anl.gov www: http://www-unix.mcs.anl.gov/~knepley/ Dag Lindbo email: dag@f.kth.se www: http://www.f.kth.se/~dag/ Ola Skavhaug email: skavhaug@simula.no www: http://home.simula.no/~skavhaug/ Andy R. Terrel email: aterrel@uchicago.edu www: http://people.cs.uchicago.edu/~aterrel/ Ivan Yashchuk email: ivan.yashchuk@aalto.fi Credits for UFC =============== UFC was merged into FFC 2014-02-18. Below is the list of credits for UFC at the time of the merge. Main authors: Martin Sandve Alnaes Anders Logg Kent-Andre Mardal Hans Petter Langtangen Main contributors: Asmund Odegard Kristian Oelgaard Johan Hake Garth N. Wells Marie E. Rognes Johannes Ring Credits for UFLACS ================== UFLACS was merged into FFC 2016-02-16. Author: Martin Sandve Alnæs Contributors: Anders Logg Garth N. Wells Johannes Ring Matthias Liertzer Steffen Müthing ffcx-0.7.0/CODE_OF_CONDUCT.md000066400000000000000000000072501450721277100152550ustar00rootroot00000000000000Code of Conduct =============== Our Pledge ---------- In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. Our Standards ------------- Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others’ private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting Our Responsibilities -------------------- Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. Scope ----- This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. Enforcement ----------- Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fenics-steering-council@googlegroups.com. Alternatively, you may report individually to one of the members of the Steering Council. Complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project’s leadership. If you feel that your report has not been followed up satisfactorily, then you may contact our parent organisation NumFOCUS at info@numfocus.org for further redress. Attribution ----------- This Code of Conduct is adapted from the Contributor Covenant, version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html. Adaptations ----------- * Allow reporting to individual Steering Council members * Added the option to contact NumFOCUS for further redress. For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faqffcx-0.7.0/COPYING000066400000000000000000001045131450721277100135110ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . ffcx-0.7.0/COPYING.LESSER000066400000000000000000000167271450721277100145160ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. ffcx-0.7.0/ChangeLog.rst000066400000000000000000000611741450721277100150440ustar00rootroot00000000000000Changelog ========= 0.6.0 ----- See https://github.com/FEniCS/ffcx/compare/v0.5.0...v0.6.0 0.5.0 ----- See: https://github.com/FEniCS/ffcx/compare/v0.5.0...v0.4.0 0.4.0 ----- See: https://github.com/FEniCS/ffcx/compare/v0.4.0...v0.3.0 0.3.0 ----- See: https://github.com/FEniCS/ffcx/compare/v0.3.0...v0.2.0 0.2.0 ----- - No changes 0.1.0 ----- Alpha release of ffcx 2018.2.0.dev0 ------------- - No changes 2018.1.0.dev0 (no release) -------------------------- - Forked FFCx 2017.2.0 (2017-12-05) --------------------- - Some fixes for ufc::eval for esoteric element combinations - Reimplement code generation for all ufc classes with new class ufc::coordinate_mapping which can map between coordinates, compute jacobians, etc. for a coordinate mapping parameterized by a specific finite element. - New functions in ufc::finite_element: - evaluate_reference_basis - evaluate_reference_basis_derivatives - transform_reference_basis_derivatives - tabulate_reference_dof_coordinates - New functions in ufc::dofmap: - num_global_support_dofs - num_element_support_dofs - Improved docstrings for parts of ufc.h - FFC now accepts Q and DQ finite element families defined on quadrilaterals and hexahedrons - Some fixes for ufc_geometry.h for quadrilateral and hexahedron cells 2017.1.0.post2 (2017-09-12) --------------------------- - Change PyPI package name to fenics-ffc. 2017.1.0 (2017-05-09) --------------------- - Let ffc -O parameter take an optional integer level like -O2, -O0 - Implement blockwise optimizations in uflacs code generation - Expose uflacs optimization parameters through parameter system 2016.2.0 (2016-11-30) --------------------- - Jit compiler now compiles elements separately from forms to avoid duplicate work - Add parameter max_signature_length to optionally shorten signatures in the jit cache - Move uflacs module into ffc.uflacs - Remove installation of pkg-config and CMake files (UFC path and compiler flags are available from ffc module) - Add dependency on dijitso and remove dependency on instant - Add experimental Bitbucket pipelines - Tidy the repo after UFC and UFLACS merge, and general spring cleanup. This includes removal of instructions how to merge two repos, commit hash c8389032268041fe94682790cb773663bdf27286. 2016.1.0 (2016-06-23) --------------------- - Add function get_ufc_include to get path to ufc.h - Merge UFLACS into FFC - Generalize ufc interface to non-affine parameterized coordinates - Add ufc::coordinate_mapping class - Make ufc interface depend on C++11 features requiring gcc version >= 4.8 - Add function ufc_signature() to the form compiler interface - Add function git_commit_hash() 1.6.0 (2015-07-28) ------------------ - Rename and modify a number of UFC interface functions. See docstrings in ufc.h for details. - Bump required SWIG version to 3.0.3 - Disable dual basis (tabulate_coordinates and evaluate_dofs) for enriched elements until correct implementation is brought up 1.5.0 (2015-01-12) ------------------ - Remove FErari support - Add support for new integral type custom_integral - Support for new form compiler backend "uflacs", downloaded separately 1.4.0 (2014-06-02) ------------------ - Add support for integrals that know which coefficients they use - Many bug fixes for facet integrals over manifolds - Merge UFC into FFC; ChangeLog for UFC appended below - Various updates mirroring UFL changes - Experimental: New custom integral with user defined quadrature points 1.3.0 (2014-01-07) ------------------ - Fix bug with runtime check of SWIG version - Move DOLFIN wrappers here from DOLFIN - Add support for new UFL operators cell_avg and facet_avg - Add new reference data handling system, now data is kept in an external repository - Fix bugs with ignoring quadrature rule arguments - Use cpp optimization by default in jit compiler 1.2.0 (2013-03-24) ------------------ - New feature: Add basic support for point integrals on vertices - New feature: Add general support for m-dimensional cells in n-dimensional space (n >= m, n, m = 1, 2, 3) 1.1.0 (2013-01-07) ------------------ - Fix bug for Conditionals related to DG constant Coefficients. Bug #1082048. - Fix bug for Conditionals, precedence rules for And and Or. Bug #1075149. - Changed data structure from list to deque when pop(0) operation is needed, speeding up split_expression operation considerable - Other minor fixes 1.0.0 (2011-12-07) ------------------ - Issue warning when form integration requires more than 100 points 1.0-rc1 (2011-11-28) -------------------- - Fix bug with coordinates on facet integrals (intervals). Bug #888682. - Add support for FacetArea, new geometric quantity in UFL. - Fix bug in optimised quadrature code, AlgebraOperators demo. Bug #890859. - Fix bug with undeclared variables in optimised quadrature code. Bug #883202. 1.0-beta2 (2011-10-11) ---------------------- - Added support for bessel functions, bessel_* (I,J,K,Y), in UFL. - Added support for error function, erf(), new math function in UFL. - Fix dof map 'need_entities' for Real spaces - Improve performance for basis function computation 1.0-beta (2011-08-11) --------------------- - Improve formatting of floats with up to one non-zero decimal place. - Fix bug involving zeros in products and sums. Bug #804160. - Fix bug for new conditions '&&', '||' and '!' in UFL. Bug #802560. - Fix bug involving VectorElement with dim=1. Bug #798578. - Fix bug with mixed element of symmetric tensor elements. Bug #745646. - Fix bug when using geometric coordinates with one quadrature point 0.9.10 (2011-05-16) ------------------- - Change license from GPL v3 or later to LGPL v3 or later - Add some schemes for low-order simplices - Request quadrature schemes by polynomial degree (not longer by number of points in each direction) - Get quadrature schemes via ffc.quadrature_schemes - Improved lock handling in JIT compiler - Include common_cell in form signature - Add possibility to set swig binary and swig path 0.9.9 (2011-02-23) ------------------ - Add support for generating error control forms with option -e - Updates for UFC 2.0 - Set minimal degree to 1 in automatic degree selection for expressions - Add command-line option -f no_ferari - Add support for plotting of elements - Add utility function compute_tensor_representation 0.9.4 (2010-09-01) ------------------ - Added memory cache in jit(), for preprocessed forms - Added support for Conditional and added demo/Conditional.ufl. - Added support for new geometric quantity Circumradius in UFL. - Added support for new geometric quantity CellVolume in UFL. 0.9.3 (2010-07-01) ------------------ - Make global_dimension for Real return an int instead of double, bug # 592088 - Add support for facet normal in 1D. - Expose -feliminate_zeros for quadrature optimisations to give user more control - Remove return of form in compile_form - Remove object_names argument to compile_element - Rename ElementUnion -> EnrichedElement - Add support for tan() and inverse trigonometric functions - Added support for ElementUnion (i.e. span of combinations of elements) - Added support for Bubble elements - Added support for UFL.SpatialCoordinate. 0.9.2 (2010-02-17) ------------------ - Bug fix in removal of unused variables in Piola-mapped terms for tensor representation 0.9.1 (2010-02-15) ------------------ - Add back support for FErari optimizations - Bug fixes in JIT compiler 0.9.0 (2010-02-02) ------------------ - Updates for FIAT 0.9.0 - Updates for UFC 1.4.0 (now supporting the full interface) - Automatic selection of representation - Change quadrature_order --> quadrature_degree - Split compile() --> compile_form(), compile_element() - Major cleanup and reorganization of code (flatter directories) - Updates for changes in UFL: Argument, Coefficient, FormData 0.7.1 ----- - Handle setting quadrature degree when it is set to None in UFL form - Added demo: HyperElasticity.ufl 0.7.0 ----- - Move contents of TODO to: https://blueprints.launchpad.net/ffc - Support for restriction of finite elements to only consider facet dofs - Use quadrature_order from metadata when integrating terms using tensor representation - Use loop to reset the entries of the local element tensor - Added new symbolic classes for quadrature optimisation (speed up compilation) - Added demos: Biharmonic.ufl, div(grad(v)) term; ReactionDiffusion.ufl, tuple notation; MetaData.ufl, how to attach metadata to the measure; ElementRestriction.ufl, restriction of elements to facets - Tabulate the coordinates of the integration points in the tabulate_tensor() function - Change command line option '-f split_implementation' -> '-f split' - Renaming of files and restructuring of the compiler directory - Added option -q rule (--quadrature-rule rule) to specify which rule to use for integration of a given integral. (Can also bet set through the metadata through "quadrature_rule"). No rules have yet been implemented, so default is the FIAT rule. - Remove support for old style .form files/format 0.6.2 (2009-04-07) ------------------ - Experimental support for UFL, supporting both .form and .ufl - Moved configuration and construction of python extension module to ufc_module 0.6.1 (2009-02-18) ------------------ - Initial work on UFL transition - Minor bug fixes - The version of ufc and swig is included in the form signature - Better system configuration for JIT compiled forms - The JIT compiled python extension module use shared_ptr for all classes 0.6.0 (2009-01-05) ------------------ - Update DOLFIN output format (-l dolfin) for DOLFIN 0.9.0 - Cross-platform fixes for test scripts - Minor bug fix for quadrature code generation (forms affected by this bug would not be able to compile - Fix bug with output of ``*.py``. - Permit dot product bewteen rectangular matrices (Frobenius norm) 0.5.1 (2008-10-20) ------------------ - New operator skew() - Allow JIT compilation of elements and dof maps - Rewrite JIT compiler to rely on Instant for caching - Display flop count for evaluating the element tensor during compilation - Add arguments language and representation to options dictionary - Fix installation on Windows - Add option -f split_implementation for separate .h and .cpp files 0.5.0 (2008-06-23) ------------------ - Remove default restriction +/- for Constant - Make JIT optimization (-O0 / -O2) optional - Add in-memory cache to speed up JIT compiler for repeated assembly - Allow subdomain integrals without needing full range of integrals - Allow simple subdomain integral specification dx(0), dx(1), ds(0) etc 0.4.5 (2008-04-30) ------------------ - Optimizations in generated quadrature code - Change formatting of floats from %g to %e, fixes problem with too long integers - Bug fix for order of values in interpolate_vertex_values, now according to UFC - Speed up JIT compiler - Add index ranges to form printing - Throw runtime error in functions not generated - Update DOLFIN format for new location of include files 0.4.4 (2008-02-18) ------------------ - RT, BDM, BDFM and Nedelec now working in 2D and 3D - New element type QuadratureElement - Add support for 1D elements - Add experimental support for new Darcy-Stokes element - Use FIAT transformed spaces instead of mapping in FFC - Updates for UFC 1.1 - Implement caching of forms/modules in ~/.ffc/cache for JIT compiler - Add script ffc-clean - New operators lhs() and rhs() - Bug fixes in simplify - Bug fixes for Nedelec and BDFM - Fix bug in mult() - Fix bug with restrictions on exterior facet integrals - Fix bug in grad() for vectors - Add divergence operator for matrices 0.4.3 (2007-10-23) ------------------ - Require FIAT to use UFC reference cells - Fix bug in form simplification - Rename abs --> modulus to avoid conflict with builtin abs - Fix bug in operators invert, abs, sqrt - Fix bug in integral tabulation - Add BDFM and Nedelec elements (nonworking) - Fix bug in JIT compiler 0.4.2 (2007-08-31) ------------------ - Change license from GPL v2 to GPL v3 or later - Add JIT (just-in-time) compiler - Fix bug for constants on interior facets 0.4.1 (2007-06-22) ------------------ - Fix bug in simplification of forms - Optimize removal of unused terms in code formattting 0.4.0 (2007-06-20) ------------------ - Move to UFC interface for code generation - Major rewrite, restructure, cleanup - Add support for Brezzi-Douglas-Marini (BDM) elements - Add support for Raviart-Thomas (RT) elements - Add support for Discontinuous Galerkin (DG) methods - Operators jump() and avg() - Add quadrature compilation mode (experimental) - Simplification of forms - Operators sqrt(), abs() and inverse - Improved Python interface - Add flag -f precision=n - Generate code for basis functions and derivatives - Use Set from set module for Python2.3 compatibility 0.3.5 (2006-12-01) ------------------ - Bug fixes - Move from Numeric to numpy 0.3.4 (2006-10-27) ------------------ - Updates for new DOLFIN mesh library - Add support for evaluation of functionals - Add operator outer() for outer product of vector-valued functions - Enable optimization of linear forms (in addition to bilinear forms) - Remove DOLFIN SWIG format - Fix bug in ffc -v/--version (thanks to Ola Skavhaug) - Consolidate DOLFIN and DOLFIN SWIG formats (patch from Johan Jansson) - Fix bug in optimized compilation (-O) for some forms ("too many values to unpack") 0.3.3 (2006-09-05) ------------------ - Fix bug in operator div() - Add operation count (number of multiplications) with -d0 - Add hint for printing more informative error messages (flag -d1) - Modify implementation of vertexeval() - Add support for boundary integrals (Garth N. Wells) 0.3.2 (2006-04-01) ------------------ - Add support for FErari optimizations, new flag -O 0.3.1 (2006-03-28) ------------------ - Remove verbose output: silence means success - Generate empty boundary integral eval() to please Intel C++ compiler - New classes TestFunction and TrialFunction 0.3.0 (2006-03-01) ------------------ - Work on manual, document command-line and user-interfaces - Name change: u --> U - Add compilation of elements without form - Add generation of FiniteElementSpec in DOLFIN formats - Fix bugs in raw and XML formats - Fix bug in LaTeX format - Fix path and predefine tokens to enable import in .form file - Report number of entries in reference tensor during compilation 0.2.5 (2005-12-28) ------------------ - Add demo Stabilization.form - Further speedup computation of reference tensor (use ufunc Numeric.add) 0.2.4 (2005-12-05) ------------------ - Report time taken to compute reference tensor - Restructure computation of reference tensor to use less memory. As a side effect, the speed has also been improved. - Update for DOLFIN name change node --> vertex - Update finite element interface for DOLFIN - Check for FIAT bug in discontinuous vector Lagrange elements - Fix signatures for vector-valued elements 0.2.3 (2005-11-28) ------------------ - New fast Numeric/BLAS based algorithm for computing reference tensor - Bug fix: reassign indices for complete subexpressions - Bug fix: operator Function * Integral - Check tensor notation for completeness - Bug fix: mixed elements with more than two function spaces - Don't declare unused coefficients (or gcc will complain) 0.2.2 (2005-11-14) ------------------ - Add command-line argument -v / --version - Add new operator mean() for projection onto piecewise constants - Add support for projections - Bug fix for higher order mixed elements: declaration of edge/face_ordering - Generate code for sub elements of mixed elements - Add new test form: TensorWeighteLaplacian - Add new test form: EnergyNorm - Fix bugs in mult() and vec() (skavhaug) - Reset correct entries of G for interior in BLAS mode - Only assign to entries of G that meet nonzero entries of A in BLAS mode 0.2.1 (2005-10-11) ------------------ - Only generate declarations that are needed according to format - Check for missing options and add missing default options - Simplify usage of FFC as Python module: from ffc import * - Fix bug in division with constants - Generate output for BLAS (with option -f blas) - Add new XML output format - Remove command-line option --license (collect in compiler options -f) - Modify demo Mass.form to use 3:rd order Lagrange on tets - Fix bug in dofmap() for equal order mixed elements - Add compiler option -d debuglevel - Fix Python Numeric bug: vdot --> dot 0.2.0 (2005-09-23) ------------------ - Generate function vertexeval() for evaluation at vertices - Add support for arbitrary mixed elements - Add man page - Work on manual, chapters on form language, quickstart and installation - Handle exceptions gracefully in command-line interface - Use new template fenicsmanual.cls for manual - Add new operators grad, div, rot (curl), D, rank, trace, dot, cross - Factorize common reference tensors from terms with equal signatures - Collect small building blocks for form algebra in common module tokens.py 0.1.9 (2005-07-05) ------------------ - Complete support for general order Lagrange elements on triangles and tetrahedra - Compute reordering of dofs on tets correctly - Update manual with ordering of dofs - Break compilation into two phases: build() and write() - Add new output format ASE (Matt Knepley) - Improve python interface to FFC - Remove excessive logging at compilation - Fix bug in raw output format 0.1.8 (2005-05-17) ------------------ - Access data through map in DOLFIN format - Experimental support for computation of coordinate maps - Add first draft of manual - Experimental support for computation of dof maps - Allow specification of the number of components for vector Lagrange - Count the number of zeros dropped - Fix bug in handling command-line arguments - Use module sets instead of built-in set (fix for Python 2.3) - Handle constant indices correctly (bug reported by Garth N. Wells) 0.1.7 (2005-05-02) ------------------ - Write version number to output - Add command-line option for choosing license - Display usage if no input is given - Bug fix for finding correct prefix of file name - Automatically choose name of output file (if not supplied) - Use FIAT tabulation mode for vector-valued elements (speedup a factor 5) - Use FIAT tabulation mode for scalar elements (speedup a factor 1000) - Fig bug in demo elasticity.form (change order of u and v) - Make references to constants const in DOLFIN format - Don't generate code for unused entries of geometry tensor - Update formats to write numeric constants with full precision 0.1.6 (2005-03-17) ------------------ - Add support for mixing multiple different finite elements - Add support for division with constants - Fix index bug (reverse order of multi-indices) 0.1.5 (2005-03-14) ------------------ - Automatically choose the correct quadrature rule for precomputation - Add test program for verification of FIAT quadrature rules - Fix bug for derivative of sum - Improve common interface for debugging: add indentation - Add support for constants - Fix bug for sums of more than one term (make copies of references in lists) - Add '_' in naming of geometry tensor (needed for large dimensions) - Add example elasticity.form - Cleanup build_indices() 0.1.4-1 (2005-02-07) -------------------- - Fix version number and remove build directory from tarball 0.1.4 (2005-02-04) ------------------ - Fix bug for systems, seems to work now - Add common interface for debugging - Modify DOLFIN output to initialize functions - Create unique numbers for each function - Use namespaces for DOLFIN output instead of class names - Temporary implementation of dof mapping for vector-valued elements - Make DOLFIN output format put entries into PETSc block - Change name of coefficient data: c%d[%d] -> c[%d][%d] - Change ordering of basis functions (one component at a time) - Add example poissonsystem.form - Modifications for new version of FIAT (FIAT-L) FIAT version 0.1 a factor 5 slower (no memoization) FIAT version 0.1.1 a little faster, only a factor 2 slower - Add setup.py script 0.1.3 (2004-12-06) ------------------ - Fix bug in DOLFIN format (missing value when zero) - Add output of reference tensor to LaTeX format - Make raw output format print data with full precision - Add component diagram - Change order of declaration of basis functions - Add new output format raw 0.1.2 (2004-11-17) ------------------ - Add command-line interface ffc - Add support for functions (coefficients) - Add support for constants - Allow multiple forms (left- and right-hand side) in same file - Add test examples: poisson.form, mass.form, navierstokes.form - Wrap FIAT to create vector-valued finite element spaces - Check ranks of operands - Clean up algebra, add base class Element - Add some documentation (class diagram) - Add support for LaTeX output 0.1.1-1 (2004-11-10) -------------------- - Add missing file declaration.py 0.1.1 (2004-11-10) ------------------ - Make output variable names configurable - Clean up DOLFIN code generation - Post-process form to create reference, geometry, and element tensors - Experimental support for general tensor-valued elements - Clean up and improve index reassignment - Use string formatting for generation of output - Change index ordering to access row-wise 0.1.0 (2004-10-22) ------------------ - First iteration of the FEniCS Form Compiler - Change boost::shared_ptr --> std::shared_ptr ChangeLog for UFC ================= UFC was merged into FFC 2014-02-18. Below is the ChangeLog for UFC at the time of the merge. From this point onward, UFC version numbering restarts at the same version number as FFC and the rest of FEniCS. 2.3.0 (2014-01-07) ------------------ - Use std::vector > for topology data - Remove vertex coordinates from ufc::cell - Improve detection of compatible Python libraries - Add current swigversion to the JIT compiled extension module - Remove dofmap::max_local_dimension() - Remove cell argument from dofmap::local_dimension() 2.2.0 (2013-03-24) ------------------ - Add new class ufc::point_integral - Use CMake to configure JIT compilation of forms - Generate UseUFC.cmake during configuration - Remove init_mesh(), init_cell(), init_mesh_finalize() - Remove ufc::mesh and add a vector of num_mesh_entities to global_dimension() and tabulate_dofs(). 2.1.0 (2013-01-07) ------------------ - Fix bug introduced by SWIG 2.0.5, which treated uint as Python long - Add optimization SWIG flags, fixing bug lp:987657 2.0.5 (2011-12-07) ------------------ - Improve configuration of libboost-math 2.0.4 (2011-11-28) ------------------ - Add boost_math_tr1 to library flags when JIT compiling an extension module 2.0.3 (2011-10-26) ------------------ - CMake config improvements 2.0.2 (2011-08-11) ------------------ - Some tweaks of installation 2.0.1 (2011-05-16) ------------------ - Make SWIG version >= 2.0 a requirement - Add possibility to set swig binary and swig path - Add missing const for map_{from,to}_reference_cell 2.0.0 (2011-02-23) ------------------ - Add quadrature version of tabulate_tensor - Add finite_element::map_{from,to}_reference_cell - Add finite_element::{topological,geometric}_dimension - Add dofmap::topological_dimension - Rename num_foo_integrals --> num_foo_domains - Rename dof_map --> dofmap - Add finite_element::create - Add dofmap::create 1.4.2 (2010-09-01) ------------------ - Move to CMake build system 1.4.1 (2010-07-01) ------------------ - Make functions introduced in UFC 1.1 mandatory (now pure virtual) - Update templates to allow constructor arguments in form classes 1.4.0 (2010-02-01) ------------------ - Changed behavior of create_foo_integral (returning 0 when integral is 0) - Bug fixes in installation 1.2.0 (2009-09-23) ------------------ - Add new function ufc::dof_map::max_local_dimension() - Change ufc::dof_map::local_dimension() to ufc::dof_map::local_dimension(const ufc::cell c) 1.1.2 (2009-04-07) ------------------ - Added configuration and building of python extension module to ufc_utils.build_ufc_module 1.1.1 (2009-02-20) ------------------ - The extension module is now not built, if the conditions for shared_ptr are not met - Added SCons build system - The swig generated extension module will be compiled with shared_ptr support if boost is found on system and swig is of version 1.3.35 or higher - The swig generated extension module is named ufc.py and expose all ufc base classes to python - Added a swig generated extention module to ufc. UFC now depends on swig - Changed name of the python utility module from "ufc" to "ufc_utils" 1.1.0 (2008-02-18) ------------------ - Add new function ufc::finite_element::evaluate_dofs - Add new function ufc::finite_element::evaluate_basis_all - Add new function ufc::finite_element::evaluate_basis_derivatives_all - Add new function ufc::dof_map::geometric_dimension - Add new function ufc::dof_map::num_entity_dofs - Add new function ufc::dof_map::tabulate_entity_dofs 1.0.0 (2007-06-17) ------------------ - Release of UFC 1.0 ffcx-0.7.0/INSTALL000066400000000000000000000013111450721277100134770ustar00rootroot00000000000000To install FFCx, type pip install --prefix=/path/to/install/ . This will install FFCx in the default Python path of your system, something like /path/to/install/lib/python3.6/site-packages/. To specify C++ compiler and/or compiler flags used for compiling UFC and JITing, set environment variables CXX, CXXFLAGS respectively before invoking setup.py. The installation script requires the Python module distutils, which for Debian users is available with the python-dev package. Other dependencies are listed in the file README. For detailed installation instructions, see the FFCx user manual which is available on http://fenicsproject.org/ and also in the subdirectory doc/manual/ of this source tree. ffcx-0.7.0/LICENSE000066400000000000000000000004751450721277100134650ustar00rootroot00000000000000The header file ufcx.h is released into the public domain. ------------------------------------------------------------------------------ Other files, unless stated otherwise in their head, are licensed by GNU Lesser General Public License, version 3, or later. See COPYING and COPYING.LESSER for the license text. ffcx-0.7.0/MANIFEST.in000066400000000000000000000005121450721277100142060ustar00rootroot00000000000000include AUTHORS include COPYING include COPYING.LESSER include ChangeLog.rst include INSTALL include LICENSE include ffcx/codegeneration/ufcx.h recursive-include cmake * recursive-include demo * recursive-include doc * recursive-include ffcx *.in recursive-include libs * recursive-include test * global-exclude __pycache__ *.pyc ffcx-0.7.0/README.md000066400000000000000000000043761450721277100137430ustar00rootroot00000000000000# FFCx: The FEniCSx Form Compiler [![FFCx CI](https://github.com/FEniCS/ffcx/actions/workflows/pythonapp.yml/badge.svg)](https://github.com/FEniCS/ffcx/actions/workflows/pythonapp.yml) [![Coverage Status](https://coveralls.io/repos/github/FEniCS/ffcx/badge.svg?branch=main)](https://coveralls.io/github/FEniCS/ffcx?branch=main) FFCx is a new version of the FEniCS Form Compiler. It is being actively developed and is compatible with DOLFINx. FFCx is a compiler for finite element variational forms. From a high-level description of the form in the Unified Form Language (UFL), it generates efficient low-level C code that can be used to assemble the corresponding discrete operator (tensor). In particular, a bilinear form may be assembled into a matrix and a linear form may be assembled into a vector. FFCx may be used either from the command line (by invoking the `ffcx` command) or as a Python module (`import ffcx`). FFCx is part of the FEniCS Project. For more information, visit https://www.fenicsproject.org ## Installation To install FFCx from PyPI: ``` $ pip install fenics-ffcx ``` To install FFCx from the source directory: ``` $ pip install . ``` ## Documentation Documentation can be viewed at https://docs.fenicsproject.org/ffcx/main ## Interface file installation only FFCx provides the `ufcx.h` interface header for finite element kernels, used by DOLFINx. `ufcx.h` is installed by FFCx within the Python site packages, but it is sometimes helpful to install only the header file. This can be done using `cmake`: ``` $ cmake -B build-dir -S cmake/ $ cmake --build build-dir $ cmake --install build-dir ``` ## License This program is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program. If not, see . ffcx-0.7.0/_clang-format000066400000000000000000000053651450721277100151170ustar00rootroot00000000000000--- Language: Cpp # BasedOnStyle: LLVM AccessModifierOffset: -2 AlignAfterOpenBracket: Align AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlinesLeft: false AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: true AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: All AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterDefinitionReturnType: None AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false AlwaysBreakTemplateDeclarations: true BinPackArguments: true BinPackParameters: true BraceWrapping: AfterClass: false AfterControlStatement: false AfterEnum: false AfterFunction: false AfterNamespace: false AfterObjCDeclaration: false AfterStruct: false AfterUnion: false BeforeCatch: false BeforeElse: false IndentBraces: false BreakBeforeBinaryOperators: All BreakBeforeBraces: Allman BreakBeforeTernaryOperators: true BreakConstructorInitializersBeforeComma: false BreakAfterJavaFieldAnnotations: false BreakStringLiterals: true ColumnLimit: 80 CommentPragmas: '^ IWYU pragma:' ConstructorInitializerAllOnOneLineOrOnePerLine: false ConstructorInitializerIndentWidth: 4 ContinuationIndentWidth: 4 Cpp11BracedListStyle: true DerivePointerAlignment: false DisableFormat: false ExperimentalAutoDetectBinPacking: false ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] IncludeCategories: - Regex: '^"(llvm|llvm-c|clang|clang-c)/' Priority: 2 - Regex: '^(<|"(gtest|isl|json)/)' Priority: 3 - Regex: '.*' Priority: 1 IncludeIsMainRegex: '$' IndentCaseLabels: false IndentWidth: 2 IndentWrappedFunctionNames: false JavaScriptQuotes: Leave JavaScriptWrapImports: true KeepEmptyLinesAtTheStartOfBlocks: true MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 NamespaceIndentation: None ObjCBlockIndentWidth: 2 ObjCSpaceAfterProperty: false ObjCSpaceBeforeProtocolList: true PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 60 PointerAlignment: Left ReflowComments: true SortIncludes: true SpaceAfterCStyleCast: false SpaceAfterTemplateKeyword: true SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesBeforeTrailingComments: 1 SpacesInAngles: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp11 TabWidth: 8 UseTab: Never ... ffcx-0.7.0/cmake/000077500000000000000000000000001450721277100135325ustar00rootroot00000000000000ffcx-0.7.0/cmake/CMakeLists.txt000066400000000000000000000036321450721277100162760ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.19) project(ufcx VERSION 0.7.0 DESCRIPTION "UFCx interface header for finite element kernels" LANGUAGES C HOMEPAGE_URL https://github.com/fenics/ffcx) include(GNUInstallDirs) file(SHA1 ${PROJECT_SOURCE_DIR}/../ffcx/codegeneration/ufcx.h UFCX_HASH) message("Test hash: ${UFCX_HASH}") add_library(${PROJECT_NAME} INTERFACE) add_library(${PROJECT_NAME}::${PROJECT_NAME} ALIAS ${PROJECT_NAME}) target_include_directories(${PROJECT_NAME} INTERFACE $ $) # Prepare and install CMake target/config files install(TARGETS ${PROJECT_NAME} EXPORT ${PROJECT_NAME}_Targets ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) include(CMakePackageConfigHelpers) write_basic_package_version_file("${PROJECT_NAME}ConfigVersion.cmake" VERSION ${PROJECT_VERSION} COMPATIBILITY AnyNewerVersion) configure_package_config_file("${PROJECT_NAME}Config.cmake.in" "${PROJECT_NAME}Config.cmake" INSTALL_DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake) install(EXPORT ${PROJECT_NAME}_Targets FILE ${PROJECT_NAME}Targets.cmake NAMESPACE ${PROJECT_NAME}:: DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake) install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake" DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake) # Install header file install(FILES ${PROJECT_SOURCE_DIR}/../ffcx/codegeneration/ufcx.h TYPE INCLUDE) # Configure and install pkgconfig file configure_file(ufcx.pc.in ufcx.pc @ONLY) install(FILES ${PROJECT_BINARY_DIR}/ufcx.pc DESTINATION ${CMAKE_INSTALL_DATADIR}/pkgconfig) ffcx-0.7.0/cmake/ufcx.pc.in000066400000000000000000000004211450721277100154250ustar00rootroot00000000000000prefix="@CMAKE_INSTALL_PREFIX@" exec_prefix="${prefix}" includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@ definitions=@ Name: @PROJECT_NAME@ Description: @CMAKE_PROJECT_DESCRIPTION@ URL: @CMAKE_PROJECT_HOMEPAGE_URL@ Version: @PROJECT_VERSION@ Cflags: -I"${includedir}" Libs:ffcx-0.7.0/cmake/ufcxConfig.cmake.in000066400000000000000000000002341450721277100172330ustar00rootroot00000000000000@PACKAGE_INIT@ set(UFCX_SIGNATURE @UFCX_HASH@) include("${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@Targets.cmake") check_required_components("@PROJECT_NAME@")ffcx-0.7.0/demo/000077500000000000000000000000001450721277100133765ustar00rootroot00000000000000ffcx-0.7.0/demo/BiharmonicHHJ.py000066400000000000000000000015141450721277100163560ustar00rootroot00000000000000# Copyright (C) 2016 Lizao Li # # The bilinear form a(u, v) and linear form L(v) for # Biharmonic equation in Hellan-Herrmann-Johnson (HHJ) # formulation. import basix.ufl from ufl import (Coefficient, FacetNormal, TestFunctions, TrialFunctions, dot, dS, ds, dx, grad, inner, jump, triangle) HHJ = basix.ufl.element('HHJ', "triangle", 2) P = basix.ufl.element('P', "triangle", 3) mixed_element = basix.ufl.mixed_element([HHJ, P]) (sigma, u) = TrialFunctions(mixed_element) (tau, v) = TestFunctions(mixed_element) f = Coefficient(P) def b(sigma, v): n = FacetNormal(triangle) return inner(sigma, grad(grad(v))) * dx \ - dot(dot(sigma('+'), n('+')), n('+')) * jump(grad(v), n) * dS \ - dot(dot(sigma, n), n) * dot(grad(v), n) * ds a = inner(sigma, tau) * dx - b(tau, u) + b(sigma, v) L = f * v * dx ffcx-0.7.0/demo/BiharmonicRegge.py000066400000000000000000000016331450721277100170000ustar00rootroot00000000000000# Copyright (C) 2016 Lizao Li # # The bilinear form a(u, v) and linear form L(v) for # Biharmonic equation in Regge formulation. import basix.ufl from ufl import (Coefficient, FacetNormal, Identity, TestFunctions, TrialFunctions, dot, dS, ds, dx, grad, inner, jump, tetrahedron, tr) REG = basix.ufl.element("Regge", "tetrahedron", 1) P = basix.ufl.element("Lagrange", "tetrahedron", 2) mixed_element = basix.ufl.mixed_element([REG, P]) (sigma, u) = TrialFunctions(mixed_element) (tau, v) = TestFunctions(mixed_element) f = Coefficient(P) def S(mu): return mu - Identity(3) * tr(mu) def b(mu, v): n = FacetNormal(tetrahedron) return inner(S(mu), grad(grad(v))) * dx \ - dot(dot(S(mu('+')), n('+')), n('+')) * jump(grad(v), n) * dS \ - dot(dot(S(mu), n), n) * dot(grad(v), n) * ds a = inner(S(sigma), S(tau)) * dx - b(tau, u) + b(sigma, v) L = f * v * dx ffcx-0.7.0/demo/CellGeometry.py000066400000000000000000000015071450721277100163460ustar00rootroot00000000000000# Copyright (C) 2013 Martin S. Alnaes # # A functional M involving a bunch of cell geometry quantities. import basix.ufl from ufl import (CellVolume, Circumradius, Coefficient, FacetArea, FacetNormal, SpatialCoordinate, ds, dx, tetrahedron, TrialFunction) from ufl.geometry import FacetEdgeVectors cell = tetrahedron V = basix.ufl.element("P", cell.cellname(), 1) u = Coefficient(V) # TODO: Add all geometry for all cell types to this and other demo files, need for regression test. x = SpatialCoordinate(cell) n = FacetNormal(cell) vol = CellVolume(cell) rad = Circumradius(cell) area = FacetArea(cell) M = u * (x[0] * vol * rad) * dx + u * (x[0] * vol * rad * area) * ds # + u*area*avg(n[0]*x[0]*vol*rad)*dS # Test some obscure functionality fev = FacetEdgeVectors(cell) v = TrialFunction(V) L = fev[0, 0] * v * ds ffcx-0.7.0/demo/ComplexPoisson.py000066400000000000000000000025051450721277100167340ustar00rootroot00000000000000# Copyright (C) 2023 Chris Richardson # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # The bilinear form a(u, v) and linear form L(v) for # Poisson's equation using bilinear elements on bilinear mesh geometry. import basix.ufl from ufl import (Coefficient, FunctionSpace, Mesh, TestFunction, TrialFunction, dx, grad, inner) coords = basix.ufl.element("P", "triangle", 2, shape=(2, )) mesh = Mesh(coords) dx = dx(mesh) element = basix.ufl.element("P", mesh.ufl_cell().cellname(), 2) space = FunctionSpace(mesh, element) u = TrialFunction(space) v = TestFunction(space) f = Coefficient(space) # Test literal complex number in form k = 3.213 + 1.023j a = k * inner(grad(u), grad(v)) * dx L = inner(k * f, v) * dx ffcx-0.7.0/demo/Components.py000066400000000000000000000020641450721277100160770ustar00rootroot00000000000000# Copyright (C) 2011 Garth N. Wells # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # This example demonstrates how to create vectors component-wise import basix.ufl from ufl import Coefficient, TestFunction, as_vector, dot, dx element = basix.ufl.element("Lagrange", "tetrahedron", 1, shape=(3, )) v = TestFunction(element) f = Coefficient(element) # Create vector v0 = as_vector([v[0], v[1], 0.0]) # Use created vector in linear form L = dot(f, v0) * dx ffcx-0.7.0/demo/Conditional.py000066400000000000000000000026631450721277100162220ustar00rootroot00000000000000# Copyright (C) 2010-2011 Kristian B. Oelgaard # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # Illustration on how to use Conditional to define a source term import basix.ufl from ufl import (And, Constant, Not, Or, SpatialCoordinate, TestFunction, conditional, dx, ge, gt, le, lt, triangle) element = basix.ufl.element("Lagrange", "triangle", 2) v = TestFunction(element) g = Constant(triangle) x = SpatialCoordinate(triangle) c0 = conditional(le((x[0] - 0.33)**2 + (x[1] - 0.67)**2, 0.015), -1.0, 5.0) c = conditional(le((x[0] - 0.33)**2 + (x[1] - 0.67)**2, 0.025), c0, 0.0) t0 = And(ge(x[0], 0.55), le(x[0], 0.95)) t1 = Or(lt(x[1], 0.05), gt(x[1], 0.45)) t2 = And(t0, Not(t1)) t = conditional(And(ge(x[1] - x[0] - 0.05 + 0.55, 0.0), t2), -1.0, 0.0) k = conditional(gt(1, 0), g, g + 1) f = c + t + k L = v * f * dx ffcx-0.7.0/demo/ExpressionInterpolation.py000066400000000000000000000043111450721277100206560ustar00rootroot00000000000000# Copyright (C) 2022 Jørgen S. Dokken # # This file is part of FFCx. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . # # Defines an Expression which evaluates the several different functions at # a set of interpolation points import basix import basix.ufl from ffcx.element_interface import QuadratureElement from ufl import Coefficient, FunctionSpace, Mesh, grad # Define mesh cell = "triangle" v_el = basix.ufl.element("Lagrange", cell, 1, shape=(2, )) mesh = Mesh(v_el) # Define mixed function space el = basix.ufl.element("P", cell, 2) el_int = basix.ufl.element("Discontinuous Lagrange", cell, 1, shape=(2, )) me = basix.ufl.mixed_element([el, el_int]) V = FunctionSpace(mesh, me) u = Coefficient(V) # Define expressions on each sub-space du0 = grad(u[0]) du1 = grad(u[1]) # Define an expression using quadrature elements q_rule = "gauss_jacobi" q_degree = 3 q_el = QuadratureElement(cell, (), q_rule, q_degree) Q = FunctionSpace(mesh, q_el) q = Coefficient(Q) powq = 3 * q**2 # Extract basix cell type b_cell = basix.cell.string_to_type(cell) # Find quadrature points for quadrature element b_rule = basix.quadrature.string_to_type(q_rule) quadrature_points, _ = basix.quadrature.make_quadrature(b_cell, q_degree, rule=b_rule) # Get interpolation points for output space family = basix.finite_element.string_to_family("Lagrange", cell) b_element = basix.create_element(family, b_cell, 4, basix.LagrangeVariant.gll_warped, discontinuous=True) interpolation_points = b_element.points # Create expressions that can be used for interpolation expressions = [(du0, interpolation_points), (du1, interpolation_points), (powq, quadrature_points)] ffcx-0.7.0/demo/FacetIntegrals.py000066400000000000000000000023411450721277100166430ustar00rootroot00000000000000# Copyright (C) 2009-2010 Anders Logg # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # First added: 2009-03-20 # Last changed: 2011-03-08 # # Simple example of a form defined over exterior and interior facets. import basix.ufl from ufl import (FacetNormal, TestFunction, TrialFunction, avg, ds, dS, grad, inner, jump, triangle) element = basix.ufl.element("Discontinuous Lagrange", "triangle", 1) u = TrialFunction(element) v = TestFunction(element) n = FacetNormal(triangle) a = u * v * ds \ + u('+') * v('-') * dS \ + inner(jump(u, n), avg(grad(v))) * dS \ + inner(avg(grad(u)), jump(v, n)) * dS ffcx-0.7.0/demo/FacetRestrictionAD.py000066400000000000000000000020701450721277100174240ustar00rootroot00000000000000# Copyright (C) 2010 Garth N. Wells # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . import basix.ufl from ufl import (Coefficient, TestFunction, TrialFunction, avg, derivative, dot, dS, dx, grad, inner) element = basix.ufl.element("Discontinuous Lagrange", "triangle", 1) v = TestFunction(element) w = Coefficient(element) L = inner(grad(w), grad(v)) * dx - dot(avg(grad(w)), avg(grad(v))) * dS u = TrialFunction(element) a = derivative(L, w, u) ffcx-0.7.0/demo/HyperElasticity.py000066400000000000000000000043031450721277100170720ustar00rootroot00000000000000# # Author: Martin Sandve Alnes # Date: 2008-12-22 # import basix.ufl # Modified by Garth N. Wells, 2009 from ufl import (Coefficient, Constant, FacetNormal, Identity, SpatialCoordinate, TestFunction, TrialFunction, derivative, det, diff, dot, ds, dx, exp, grad, inner, inv, tetrahedron, tr, variable) # Cell and its properties cell = tetrahedron d = cell.geometric_dimension() N = FacetNormal(cell) x = SpatialCoordinate(cell) # Elements u_element = basix.ufl.element("P", cell.cellname(), 2, shape=(3, )) p_element = basix.ufl.element("P", cell.cellname(), 1) A_element = basix.ufl.element("P", cell.cellname(), 1, shape=(3, 3)) # Test and trial functions v = TestFunction(u_element) w = TrialFunction(u_element) # Displacement at current and two previous timesteps u = Coefficient(u_element) up = Coefficient(u_element) upp = Coefficient(u_element) # Time parameters dt = Constant(cell) # Fiber field A = Coefficient(A_element) # External forces T = Coefficient(u_element) p0 = Coefficient(p_element) # Material parameters FIXME rho = Constant(cell) K = Constant(cell) c00 = Constant(cell) c11 = Constant(cell) c22 = Constant(cell) # Deformation gradient I = Identity(d) F = I + grad(u) F = variable(F) Finv = inv(F) J = det(F) # Left Cauchy-Green deformation tensor B = F * F.T I1_B = tr(B) I2_B = (I1_B**2 - tr(B * B)) / 2 I3_B = J**2 # Right Cauchy-Green deformation tensor C = F.T * F I1_C = tr(C) I2_C = (I1_C**2 - tr(C * C)) / 2 I3_C = J**2 # Green strain tensor E = (C - I) / 2 # Mapping of strain in fiber directions Ef = A * E * A.T # Strain energy function W(Q(Ef)) Q = c00 * Ef[0, 0]**2 + c11 * Ef[1, 1]**2 + c22 * Ef[2, 2]**2 # FIXME: insert some simple law here W = (K / 2) * (exp(Q) - 1) # + p stuff # First Piola-Kirchoff stress tensor P = diff(W, F) # Acceleration term discretized with finite differences k = dt / rho acc = (u - 2 * up + upp) # Residual equation # FIXME: Can contain errors, not tested! a_F = inner(acc, v) * dx \ + k * inner(P, grad(v)) * dx \ - k * dot(J * Finv * T, v) * ds(0) \ - k * dot(J * Finv * p0 * N, v) * ds(1) # Jacobi matrix of residual equation a_J = derivative(a_F, u, w) # Export forms forms = [a_F, a_J] ffcx-0.7.0/demo/MassDG0.py000066400000000000000000000016521450721277100151520ustar00rootroot00000000000000# Copyright (C) 2021 Igor Baratta # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # The bilinear form for a mass matrix. import basix.ufl from ufl import TestFunction, TrialFunction, dx, inner element = basix.ufl.element("DG", "tetrahedron", 0) v = TestFunction(element) u = TrialFunction(element) a = inner(u, v) * dx ffcx-0.7.0/demo/MassHcurl_2D_1.py000066400000000000000000000016061450721277100164210ustar00rootroot00000000000000# Copyright (C) 2004-2010 Anders Logg # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . import basix.ufl from ufl import TestFunction, TrialFunction, dx, inner element = basix.ufl.element("N1curl", "triangle", 1) v = TestFunction(element) u = TrialFunction(element) a = inner(v, u) * dx ffcx-0.7.0/demo/MassHdiv_2D_1.py000066400000000000000000000015761450721277100162440ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . import basix.ufl from ufl import TestFunction, TrialFunction, dx, inner element = basix.ufl.element("BDM", "triangle", 1) v = TestFunction(element) u = TrialFunction(element) a = inner(v, u) * dx ffcx-0.7.0/demo/MathFunctions.py000066400000000000000000000032301450721277100165300ustar00rootroot00000000000000# Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # Test all algebra operators on Coefficients. import basix.ufl from ufl import (Coefficient, acos, asin, atan, bessel_J, bessel_Y, cos, dx, erf, exp, ln, sin, sqrt, tan) element = basix.ufl.element("Lagrange", "triangle", 1) c0 = Coefficient(element) c1 = Coefficient(element) s0 = 3 * c0 - c1 p0 = c0 * c1 f0 = c0 / c1 integrand = sqrt(c0) + sqrt(s0) + sqrt(p0) + sqrt(f0)\ + exp(c0) + exp(s0) + exp(p0) + exp(f0)\ + ln(c0) + ln(s0) + ln(p0) + ln(f0)\ + cos(c0) + cos(s0) + cos(p0) + cos(f0)\ + sin(c0) + sin(s0) + sin(p0) + sin(f0)\ + tan(c0) + tan(s0) + tan(p0) + tan(f0)\ + acos(c0) + acos(s0) + acos(p0) + acos(f0)\ + asin(c0) + asin(s0) + asin(p0) + asin(f0)\ + atan(c0) + atan(s0) + atan(p0) + atan(f0)\ + erf(c0) + erf(s0) + erf(p0) + erf(f0)\ + bessel_J(1, c0) + bessel_J(1, s0) + bessel_J(0, p0) + bessel_J(0, f0)\ + bessel_Y(1, c0) + bessel_Y(1, s0) + bessel_Y(0, p0) + bessel_Y(0, f0) a = integrand * dx ffcx-0.7.0/demo/MetaData.py000066400000000000000000000025321450721277100154320ustar00rootroot00000000000000# Copyright (C) 2009 Kristian B. Oelgaard # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # Test form for metadata. import basix.ufl from ufl import Coefficient, TestFunction, TrialFunction, dx, grad, inner element = basix.ufl.element("Lagrange", "triangle", 1) vector_element = basix.ufl.element("Lagrange", "triangle", 1, shape=(2, )) u = TrialFunction(element) v = TestFunction(element) c = Coefficient(vector_element) # Terms on the same subdomain using different quadrature degree a = inner(grad(u), grad(v)) * dx(0, degree=8)\ + inner(c, c) * inner(grad(u), grad(v)) * dx(1, degree=4)\ + inner(c, c) * inner(grad(u), grad(v)) * dx(1, degree=2)\ + inner(grad(u), grad(v)) * dx(1, degree=-1) L = v * dx(0, metadata={"precision": 1}) ffcx-0.7.0/demo/Mini.py000066400000000000000000000026231450721277100146470ustar00rootroot00000000000000# Copyright (C) 2010 Marie E. Rognes # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # Illustration of vector sum of elements (EnrichedElement): The # bilinear form a(u, v) for the Stokes equations using a mixed # formulation involving the Mini element. The velocity element is # composed of a P1 element augmented by the cubic bubble function. import basix.ufl from ufl import TestFunctions, TrialFunctions, div, dx, grad, inner P1 = basix.ufl.element("Lagrange", "triangle", 1) B = basix.ufl.element("Bubble", "triangle", 3) V = basix.ufl.blocked_element(basix.ufl.enriched_element([P1, B]), shape=(2, )) Q = basix.ufl.element("P", "triangle", 1) Mini = basix.ufl.mixed_element([V, Q]) (u, p) = TrialFunctions(Mini) (v, q) = TestFunctions(Mini) a = (inner(grad(u), grad(v)) - div(v) * p + div(u) * q) * dx ffcx-0.7.0/demo/MixedCoefficient.py000066400000000000000000000020751450721277100171610ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Copyright (C) 2016 Miklós Homolya # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # Mixed coefficient. import basix.ufl from ufl import Coefficients, dot, dS, dx DG = basix.ufl.element("DG", "triangle", 0, shape=(2, )) CG = basix.ufl.element("Lagrange", "triangle", 2) RT = basix.ufl.element("RT", "triangle", 3) element = basix.ufl.mixed_element([DG, CG, RT]) f, g, h = Coefficients(element) forms = [dot(f('+'), h('-')) * dS + g * dx] ffcx-0.7.0/demo/MixedGradient.py000066400000000000000000000005161450721277100164760ustar00rootroot00000000000000import basix.ufl from ufl import TestFunctions, TrialFunctions, ds, grad, inner element1 = basix.ufl.element("DG", "triangle", 1) element2 = basix.ufl.element("DGT", "triangle", 1) element = basix.ufl.mixed_element([element1, element2]) u = TrialFunctions(element)[0] v = TestFunctions(element)[0] a = inner(grad(u), grad(v)) * ds ffcx-0.7.0/demo/MixedPoissonDual.py000066400000000000000000000025061450721277100172020ustar00rootroot00000000000000# Copyright (C) 2014 Jan Blechta # # This file is part of FFCx. # # DOLFINx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DOLFINx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DOLFINx. If not, see . # # First added: 2014-01-29 # Last changed: 2014-01-29 # # The bilinear form a(u, v) and linear form L(v) for a two-field # (mixed) formulation of Poisson's equation import basix.ufl from ufl import Coefficient, TestFunctions, TrialFunctions, dot, ds, dx, grad DRT = basix.ufl.element("Discontinuous RT", "triangle", 2) P = basix.ufl.element("P", "triangle", 3) W = basix.ufl.mixed_element([DRT, P]) (sigma, u) = TrialFunctions(W) (tau, v) = TestFunctions(W) P1 = basix.ufl.element("P", "triangle", 1) f = Coefficient(P1) g = Coefficient(P1) a = (dot(sigma, tau) + dot(grad(u), tau) + dot(sigma, grad(v))) * dx L = - f * v * dx - g * v * ds ffcx-0.7.0/demo/Normals.py000066400000000000000000000021211450721277100153570ustar00rootroot00000000000000# Copyright (C) 2009 Peter Brune # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # This example demonstrates how to use the facet normals # Merely project the normal onto a vector section. import basix.ufl from ufl import FacetNormal, TestFunction, TrialFunction, dot, ds, triangle cell = triangle element = basix.ufl.element("Lagrange", cell.cellname(), 1, shape=(2, )) n = FacetNormal(cell) v = TrialFunction(element) u = TestFunction(element) a = dot(v, u) * ds L = dot(n, u) * ds ffcx-0.7.0/demo/Poisson1D.py000066400000000000000000000020341450721277100155660ustar00rootroot00000000000000# Copyright (C) 2004-2007 Anders Logg # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # The bilinear form a(u, v) and linear form L(v) for # Poisson's equation. import basix.ufl from ufl import Coefficient, TestFunction, TrialFunction, dx, grad, inner element = basix.ufl.element("Lagrange", "interval", 1) u = TrialFunction(element) v = TestFunction(element) f = Coefficient(element) a = inner(grad(u), grad(v)) * dx L = f * v * dx ffcx-0.7.0/demo/PoissonQuad.py000066400000000000000000000023701450721277100162170ustar00rootroot00000000000000# Copyright (C) 2016 Jan Blechta # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # The bilinear form a(u, v) and linear form L(v) for # Poisson's equation using bilinear elements on bilinear mesh geometry. import basix.ufl from ufl import (Coefficient, FunctionSpace, Mesh, TestFunction, TrialFunction, dx, grad, inner) coords = basix.ufl.element("P", "triangle", 2, shape=(2, )) mesh = Mesh(coords) dx = dx(mesh) element = basix.ufl.element("P", mesh.ufl_cell().cellname(), 2) space = FunctionSpace(mesh, element) u = TrialFunction(space) v = TestFunction(space) f = Coefficient(space) a = inner(grad(u), grad(v)) * dx L = f * v * dx ffcx-0.7.0/demo/ProjectionManifold.py000066400000000000000000000022561450721277100175430ustar00rootroot00000000000000# Copyright (C) 2012 Marie E. Rognes and David Ham # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # This demo illustrates use of finite element spaces defined over # simplicies embedded in higher dimensions import basix.ufl from ufl import TestFunctions, TrialFunctions, div, dx, inner # Define element over this domain V = basix.ufl.element("RT", "triangle", 1, gdim=3) Q = basix.ufl.element("DG", "triangle", 0, gdim=3) element = basix.ufl.mixed_element([V, Q]) (u, p) = TrialFunctions(element) (v, q) = TestFunctions(element) a = (inner(u, v) + div(u) * q + div(v) * p) * dx ffcx-0.7.0/demo/ReactionDiffusion.py000066400000000000000000000021231450721277100173610ustar00rootroot00000000000000# Copyright (C) 2009 Anders Logg # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # The bilinear form a(u, v) and linear form L(v) for a simple # reaction-diffusion equation using simplified tuple notation. import basix.ufl from ufl import Coefficient, TestFunction, TrialFunction, dx, grad, inner element = basix.ufl.element("Lagrange", "triangle", 1) u = TrialFunction(element) v = TestFunction(element) f = Coefficient(element) a = (inner(grad(u), grad(v)) + u * v) * dx L = f * v * dx ffcx-0.7.0/demo/SpatialCoordinates.py000066400000000000000000000024361450721277100175450ustar00rootroot00000000000000# Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # The bilinear form a(u, v) and linear form L(v) for # Poisson's equation where spatial coordinates are used to define the source # and boundary flux terms. import basix.ufl from ufl import (SpatialCoordinate, TestFunction, TrialFunction, ds, dx, exp, grad, inner, sin, triangle) element = basix.ufl.element("Lagrange", "triangle", 2) u = TrialFunction(element) v = TestFunction(element) x = SpatialCoordinate(triangle) d_x = x[0] - 0.5 d_y = x[1] - 0.5 f = 10.0 * exp(-(d_x * d_x + d_y * d_y) / 0.02) g = sin(5.0 * x[0]) a = inner(grad(u), grad(v)) * dx L = f * v * dx + g * v * ds ffcx-0.7.0/demo/StabilisedStokes.py000066400000000000000000000025721450721277100172320ustar00rootroot00000000000000# Copyright (c) 2005-2007 Anders Logg # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # The bilinear form a(u, v) and Linear form L(v) for the Stokes # equations using a mixed formulation (equal-order stabilized). import basix.ufl from ufl import (Coefficient, TestFunctions, TrialFunctions, div, dot, dx, grad, inner) vector = basix.ufl.element("Lagrange", "triangle", 1, shape=(2, )) scalar = basix.ufl.element("Lagrange", "triangle", 1) system = basix.ufl.mixed_element([vector, scalar]) (u, p) = TrialFunctions(system) (v, q) = TestFunctions(system) f = Coefficient(vector) h = Coefficient(scalar) beta = 0.2 delta = beta * h * h a = (inner(grad(u), grad(v)) - div(v) * p + div(u) * q + delta * dot(grad(p), grad(q))) * dx L = dot(f, v + delta * grad(q)) * dx ffcx-0.7.0/demo/Symmetry.py000066400000000000000000000003451450721277100156030ustar00rootroot00000000000000import basix.ufl from ufl import TestFunction, TrialFunction, dx, grad, inner P1 = basix.ufl.element("P", "triangle", 1, shape=(2, 2), symmetry=True) u = TrialFunction(P1) v = TestFunction(P1) a = inner(grad(u), grad(v)) * dx ffcx-0.7.0/demo/TraceElement.py000066400000000000000000000015431450721277100163230ustar00rootroot00000000000000# Copyright (C) 2015 Marie E. Rognes # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . import basix.ufl from ufl import TestFunction, avg, ds, dS element = basix.ufl.element("HDiv Trace", "triangle", 0) v = TestFunction(element) L = v * ds + avg(v) * dS ffcx-0.7.0/demo/VectorConstant.py000066400000000000000000000026071450721277100167310ustar00rootroot00000000000000# Copyright (C) 2016 Jan Blechta # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # The bilinear form a(u, v) and linear form L(v) for # Poisson's equation using bilinear elements on bilinear mesh geometry. import basix.ufl from ufl import (Constant, Coefficient, FunctionSpace, Mesh, TestFunction, TrialFunction, dx, grad, inner) coords = basix.ufl.element("P", "triangle", 2, shape=(2, )) mesh = Mesh(coords) dx = dx(mesh) element = basix.ufl.element("P", mesh.ufl_cell().cellname(), 2) space = FunctionSpace(mesh, element) u = TrialFunction(space) v = TestFunction(space) f = Coefficient(space) L = f * v * dx mu = Constant(mesh, shape=(3,)) theta = - (mu[1] - 2) / mu[0] - (2 * (2 * mu[0] - 2) * (mu[0] - 1)) / (mu[0] * (mu[1] - 2)) a = theta * inner(grad(u), grad(v)) * dx ffcx-0.7.0/demo/VectorPoisson.py000066400000000000000000000020741450721277100165700ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # The bilinear form a(u, v) and linear form L(v) for # the vector-valued Poisson's equation. import basix.ufl from ufl import Coefficient, TestFunction, TrialFunction, dx, grad, inner element = basix.ufl.element("Lagrange", "triangle", 1, shape=(2, )) u = TrialFunction(element) v = TestFunction(element) f = Coefficient(element) a = inner(grad(u), grad(v)) * dx L = inner(f, v) * dx ffcx-0.7.0/demo/test_demos.py000066400000000000000000000020551450721277100161200ustar00rootroot00000000000000import os import sys import pytest demo_dir = os.path.dirname(os.path.realpath(__file__)) ufl_files = [] for file in os.listdir(demo_dir): if file.endswith(".py") and not file == "test_demos.py": ufl_files.append(file[:-3]) @pytest.mark.parametrize("file", ufl_files) def test_demo(file): if file in [ "MixedGradient", "TraceElement", # HDiv Trace "MixedElasticity", # VectorElement of BDM "RestrictedElement", "_TensorProductElement" ]: # Skip demos that use elements not yet implemented in Basix pytest.skip() opts = "" if "Complex" in file: opts = '--scalar_type "double _Complex"' extra_flags = "-Wunused-variable -Werror -fPIC " assert os.system(f"cd {demo_dir} && ffcx {opts} {file}.py") == 0 assert os.system(f"cd {demo_dir} && " "CPATH=../ffcx/codegeneration/ " f"gcc -I/usr/include/python{sys.version_info.major}.{sys.version_info.minor} {extra_flags}" f"-shared {file}.c -o {file}.so") == 0 ffcx-0.7.0/doc/000077500000000000000000000000001450721277100132175ustar00rootroot00000000000000ffcx-0.7.0/doc/Makefile000066400000000000000000000011541450721277100146600ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = FEniCSFormCompilerX SOURCEDIR = source BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)ffcx-0.7.0/doc/source/000077500000000000000000000000001450721277100145175ustar00rootroot00000000000000ffcx-0.7.0/doc/source/conf.py000066400000000000000000000131111450721277100160130ustar00rootroot00000000000000# Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/stable/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) import datetime import ffcx # -- Project information ----------------------------------------------------- project = 'FEniCS Form Compiler X' now = datetime.datetime.now() date = now.date() copyright = f'{date.year}, FEniCS Project' author = 'FEniCS Project' # The short X.Y version version = ffcx.__version__ # The full version, including alpha/beta/rc tags release = ffcx.__version__ # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = 'alabaster' html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'FEniCSFormCompilerXdoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'FEniCSFormCompilerX.tex', 'FEniCS Form Compiler X Documentation', 'FEniCS Project', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'fenicsformcompilerx', 'FEniCS Form Compiler X Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'FEniCSFormCompilerX', 'FEniCS Form Compiler X Documentation', author, 'FEniCSFormCompilerX', 'One line description of project.', 'Miscellaneous'), ] # -- Extension configuration ------------------------------------------------- # -- Options for todo extension ---------------------------------------------- # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True autodoc_default_options = {'members': True, 'show-inheritance': True, 'imported-members': True, 'undoc-members': True} autosummary_generate = True autoclass_content = "both" autodoc_default_flags = ['members', 'show-inheritance'] napoleon_numpy_docstring = True napoleon_google_docstring = True ffcx-0.7.0/doc/source/index.rst000066400000000000000000000012011450721277100163520ustar00rootroot00000000000000FEniCS Form Compiler 'X' documentation ====================================== The is an experimental version of the FEniCS Form Compiler. It is developed at https://github.com/FEniCS/ffcx. .. toctree:: :maxdepth: 2 :caption: Contents: API reference ============= .. autosummary:: :toctree: _autogenerated ffcx ffcx.__main__ ffcx.analysis ffcx.compiler ffcx.element_interface ffcx.formatting ffcx.main ffcx.naming ffcx.codegeneration ffcx.options ffcx.ir.representation ffcx.ir.representationutils Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ffcx-0.7.0/ffcx/000077500000000000000000000000001450721277100134005ustar00rootroot00000000000000ffcx-0.7.0/ffcx/__init__.py000066400000000000000000000010151450721277100155060ustar00rootroot00000000000000# Copyright (C) 2009-2018 FEniCS Project # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """FEniCS Form Compiler (FFCx). FFCx compiles finite element variational forms into C code. """ import importlib.metadata import logging # Import default options from ffcx.options import get_options # noqa: F401 __version__ = importlib.metadata.version("fenics-ffcx") logging.basicConfig() logger = logging.getLogger("ffcx") logging.captureWarnings(capture=True) ffcx-0.7.0/ffcx/__main__.py000066400000000000000000000004211450721277100154670ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2017-2017 Martin Sandve Alnæs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later from ffcx.main import main if __name__ == "__main__": import sys sys.exit(main()) ffcx-0.7.0/ffcx/analysis.py000066400000000000000000000226151450721277100156030ustar00rootroot00000000000000# Copyright (C) 2007-2020 Anders Logg, Martin Alnaes, Kristian B. Oelgaard, # Michal Habera and others # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Compiler stage 1: Analysis. This module implements the analysis/preprocessing of variational forms, including automatic selection of elements, degrees and form representation type. """ import logging import typing from warnings import warn import numpy as np import numpy.typing as npt import basix.ufl import ufl from ffcx.element_interface import convert_element logger = logging.getLogger("ffcx") class UFLData(typing.NamedTuple): form_data: typing.Tuple[ufl.algorithms.formdata.FormData, ...] # Tuple of ufl form data unique_elements: typing.List[basix.ufl._ElementBase] # List of unique elements # Lookup table from each unique element to its index in `unique_elements` element_numbers: typing.Dict[basix.ufl._ElementBase, int] unique_coordinate_elements: typing.List[basix.ufl._ElementBase] # List of unique coordinate elements # List of ufl Expressions as tuples (expression, points, original_expression) expressions: typing.List[typing.Tuple[ufl.core.expr.Expr, npt.NDArray[np.float64], ufl.core.expr.Expr]] def analyze_ufl_objects(ufl_objects: typing.List, options: typing.Dict) -> UFLData: """Analyze ufl object(s). Options ---------- ufl_objects options FFCx options. These options take priority over all other set options. Returns a data structure holding ------- form_datas Form_data objects unique_elements Unique elements across all forms and expressions element_numbers Mapping to unique numbers for all elements unique_coordinate_elements Unique coordinate elements across all forms and expressions expressions List of all expressions after post-processing, with its evaluation points and the original expression """ logger.info(79 * "*") logger.info("Compiler stage 1: Analyzing UFL objects") logger.info(79 * "*") elements = [] coordinate_elements = [] # Group objects by types forms = [] expressions = [] processed_expressions = [] for ufl_object in ufl_objects: if isinstance(ufl_object, ufl.form.Form): forms.append(ufl_object) elif isinstance(ufl_object, ufl.FiniteElementBase): elements.append(convert_element(ufl_object)) elif isinstance(ufl_object, ufl.Mesh): coordinate_elements.append(convert_element(ufl_object.ufl_coordinate_element())) elif isinstance(ufl_object[0], ufl.core.expr.Expr): original_expression = ufl_object[0] points = np.asarray(ufl_object[1]) expressions.append((original_expression, points)) else: raise TypeError("UFL objects not recognised.") form_data = tuple(_analyze_form(form, options) for form in forms) for data in form_data: elements += [convert_element(e) for e in data.unique_sub_elements] coordinate_elements += [convert_element(e) for e in data.coordinate_elements] for original_expression, points in expressions: elements += [convert_element(e) for e in ufl.algorithms.extract_elements(original_expression)] processed_expression = _analyze_expression(original_expression, options) processed_expressions += [(processed_expression, points, original_expression)] elements += ufl.algorithms.analysis.extract_sub_elements(elements) # Sort elements so sub-elements come before mixed elements unique_elements = ufl.algorithms.sort_elements(set(elements)) unique_coordinate_element_list = sorted(set(coordinate_elements), key=lambda x: repr(x)) for e in unique_elements: assert isinstance(e, basix.ufl._ElementBase) # Compute dict (map) from element to index element_numbers = {element: i for i, element in enumerate(unique_elements)} return UFLData(form_data=form_data, unique_elements=unique_elements, element_numbers=element_numbers, unique_coordinate_elements=unique_coordinate_element_list, expressions=processed_expressions) def _analyze_expression(expression: ufl.core.expr.Expr, options: typing.Dict): """Analyzes and preprocesses expressions.""" preserve_geometry_types = (ufl.classes.Jacobian, ) expression = ufl.algorithms.apply_algebra_lowering.apply_algebra_lowering(expression) expression = ufl.algorithms.apply_derivatives.apply_derivatives(expression) expression = ufl.algorithms.apply_function_pullbacks.apply_function_pullbacks(expression) expression = ufl.algorithms.apply_geometry_lowering.apply_geometry_lowering(expression, preserve_geometry_types) expression = ufl.algorithms.apply_derivatives.apply_derivatives(expression) expression = ufl.algorithms.apply_geometry_lowering.apply_geometry_lowering(expression, preserve_geometry_types) expression = ufl.algorithms.apply_derivatives.apply_derivatives(expression) complex_mode = "_Complex" in options["scalar_type"] if not complex_mode: expression = ufl.algorithms.remove_complex_nodes.remove_complex_nodes(expression) return expression def _analyze_form(form: ufl.form.Form, options: typing.Dict) -> ufl.algorithms.formdata.FormData: """Analyzes UFL form and attaches metadata. Args: form: forms options: options Returns: Form data computed by UFL with metadata attached Note: The main workload of this function is extraction of unique/default metadata from options, integral metadata or inherited from UFL (in case of quadrature degree). """ if form.empty(): raise RuntimeError(f"Form ({form}) seems to be zero: cannot compile it.") if _has_custom_integrals(form): raise RuntimeError(f"Form ({form}) contains unsupported custom integrals.") # Set default spacing for coordinate elements to be equispaced for n, i in enumerate(form._integrals): element = i._ufl_domain._ufl_coordinate_element if not isinstance(element, basix.ufl._ElementBase) and element.degree() > 2: warn("UFL coordinate elements using elements not created via Basix may not work with DOLFINx") # Check for complex mode complex_mode = "_Complex" in options["scalar_type"] # Compute form metadata form_data = ufl.algorithms.compute_form_data( form, do_apply_function_pullbacks=True, do_apply_integral_scaling=True, do_apply_geometry_lowering=True, preserve_geometry_types=(ufl.classes.Jacobian,), do_apply_restrictions=True, do_append_everywhere_integrals=False, # do not add dx integrals to dx(i) in UFL complex_mode=complex_mode) # If form contains a quadrature element, use the custom quadrature scheme custom_q = None for e in form_data.unique_elements: e = convert_element(e) if e.has_custom_quadrature: if custom_q is None: custom_q = e.custom_quadrature() else: assert np.allclose(e._points, custom_q[0]) assert np.allclose(e._weights, custom_q[1]) # Determine unique quadrature degree and quadrature scheme # per each integral data for id, integral_data in enumerate(form_data.integral_data): # Iterate through groups of integral data. There is one integral # data for all integrals with same domain, itype, subdomain_id # (but possibly different metadata). # # Quadrature degree and quadrature scheme must be the same for # all integrals in this integral data group, i.e. must be the # same for for the same (domain, itype, subdomain_id) qd_default = -1 qr_default = "default" for i, integral in enumerate(integral_data.integrals): metadata = integral.metadata() if custom_q is None: # Extract quadrature degree qd_metadata = integral.metadata().get("quadrature_degree", qd_default) pd_estimated = np.max(integral.metadata()["estimated_polynomial_degree"]) if qd_metadata != qd_default: qd = qd_metadata else: qd = pd_estimated # Extract quadrature rule qr = integral.metadata().get("quadrature_rule", qr_default) logger.info(f"Integral {i}, integral group {id}:") logger.info(f"--- quadrature rule: {qr}") logger.info(f"--- quadrature degree: {qd}") metadata.update({"quadrature_degree": qd, "quadrature_rule": qr}) else: metadata.update({"quadrature_points": custom_q[0], "quadrature_weights": custom_q[1], "quadrature_rule": "custom"}) integral_data.integrals[i] = integral.reconstruct(metadata=metadata) return form_data def _has_custom_integrals(o: typing.Union[ufl.integral.Integral, ufl.classes.Form, list, tuple]) -> bool: """Check for custom integrals.""" if isinstance(o, ufl.integral.Integral): return o.integral_type() in ufl.custom_integral_types elif isinstance(o, ufl.classes.Form): return any(_has_custom_integrals(itg) for itg in o.integrals()) elif isinstance(o, (list, tuple)): return any(_has_custom_integrals(itg) for itg in o) else: raise NotImplementedError ffcx-0.7.0/ffcx/codegeneration/000077500000000000000000000000001450721277100163665ustar00rootroot00000000000000ffcx-0.7.0/ffcx/codegeneration/C/000077500000000000000000000000001450721277100165505ustar00rootroot00000000000000ffcx-0.7.0/ffcx/codegeneration/C/__init__.py000066400000000000000000000000001450721277100206470ustar00rootroot00000000000000ffcx-0.7.0/ffcx/codegeneration/C/basix_custom_element_template.py000066400000000000000000000017031450721277100252270ustar00rootroot00000000000000# Code generation format strings for UFC (Unified Form-assembly Code) # This code is released into the public domain. # # The FEniCS Project (http://www.fenicsproject.org/) 2018. factory = """ // Code for custom element {factory_name} {value_shape_init} {wcoeffs_init} {npts_init} {ndofs_init} {x_init} {M_init} ufcx_basix_custom_finite_element {factory_name} = {{ .cell_type = {cell_type}, .value_shape_length = {value_shape_length}, .value_shape = {value_shape}, .wcoeffs_rows = {wcoeffs_rows}, .wcoeffs_cols = {wcoeffs_cols}, .wcoeffs = {wcoeffs}, .npts = {npts}, .ndofs = {ndofs}, .x = {x}, .M = {M}, .map_type = {map_type}, .sobolev_space = {sobolev_space}, .discontinuous = {discontinuous}, .highest_complete_degree = {highest_complete_degree}, .interpolation_nderivs = {interpolation_nderivs}, .highest_degree = {highest_degree}, .polyset_type = {polyset_type} }}; // End of code for custom element {factory_name} """ ffcx-0.7.0/ffcx/codegeneration/C/c_implementation.py000066400000000000000000000235421450721277100224570ustar00rootroot00000000000000# Copyright (C) 2023 Chris Richardson # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import warnings import ffcx.codegeneration.lnodes as L from ffcx.codegeneration.utils import scalar_to_value_type math_table = { "double": { "sqrt": "sqrt", "abs": "fabs", "cos": "cos", "sin": "sin", "tan": "tan", "acos": "acos", "asin": "asin", "atan": "atan", "cosh": "cosh", "sinh": "sinh", "tanh": "tanh", "acosh": "acosh", "asinh": "asinh", "atanh": "atanh", "power": "pow", "exp": "exp", "ln": "log", "erf": "erf", "atan_2": "atan2", "min_value": "fmin", "max_value": "fmax", "bessel_y": "yn", "bessel_j": "jn", }, "float": { "sqrt": "sqrtf", "abs": "fabsf", "cos": "cosf", "sin": "sinf", "tan": "tanf", "acos": "acosf", "asin": "asinf", "atan": "atanf", "cosh": "coshf", "sinh": "sinhf", "tanh": "tanhf", "acosh": "acoshf", "asinh": "asinhf", "atanh": "atanhf", "power": "powf", "exp": "expf", "ln": "logf", "erf": "erff", "atan_2": "atan2f", "min_value": "fminf", "max_value": "fmaxf", "bessel_y": "yn", "bessel_j": "jn", }, "long double": { "sqrt": "sqrtl", "abs": "fabsl", "cos": "cosl", "sin": "sinl", "tan": "tanl", "acos": "acosl", "asin": "asinl", "atan": "atanl", "cosh": "coshl", "sinh": "sinhl", "tanh": "tanhl", "acosh": "acoshl", "asinh": "asinhl", "atanh": "atanhl", "power": "powl", "exp": "expl", "ln": "logl", "erf": "erfl", "atan_2": "atan2l", "min_value": "fminl", "max_value": "fmaxl", }, "double _Complex": { "sqrt": "csqrt", "abs": "cabs", "cos": "ccos", "sin": "csin", "tan": "ctan", "acos": "cacos", "asin": "casin", "atan": "catan", "cosh": "ccosh", "sinh": "csinh", "tanh": "ctanh", "acosh": "cacosh", "asinh": "casinh", "atanh": "catanh", "power": "cpow", "exp": "cexp", "ln": "clog", "real": "creal", "imag": "cimag", "conj": "conj", "max_value": "fmax", "min_value": "fmin", "bessel_y": "yn", "bessel_j": "jn", }, "float _Complex": { "sqrt": "csqrtf", "abs": "cabsf", "cos": "ccosf", "sin": "csinf", "tan": "ctanf", "acos": "cacosf", "asin": "casinf", "atan": "catanf", "cosh": "ccoshf", "sinh": "csinhf", "tanh": "ctanhf", "acosh": "cacoshf", "asinh": "casinhf", "atanh": "catanhf", "power": "cpowf", "exp": "cexpf", "ln": "clogf", "real": "crealf", "imag": "cimagf", "conj": "conjf", "max_value": "fmaxf", "min_value": "fminf", "bessel_y": "yn", "bessel_j": "jn", }, } class CFormatter(object): def __init__(self, scalar) -> None: self.scalar_type = scalar self.real_type = scalar_to_value_type(scalar) def _dtype_to_name(self, dtype): if dtype == L.DataType.SCALAR: return self.scalar_type if dtype == L.DataType.REAL: return self.real_type if dtype == L.DataType.INT: return "int" raise ValueError(f"Invalid dtype: {dtype}") def _format_number(self, x): # Use 16sf for precision (good for float64 or less) if isinstance(x, complex): return f"({x.real:.16}+I*{x.imag:.16})" elif isinstance(x, float): return f"{x:.16}" return str(x) def _build_initializer_lists(self, values): arr = "{" if len(values.shape) == 1: arr += ", ".join(self._format_number(v) for v in values) elif len(values.shape) > 1: arr += ",\n ".join(self._build_initializer_lists(v) for v in values) arr += "}" return arr def format_statement_list(self, slist) -> str: return "".join(self.c_format(s) for s in slist.statements) def format_comment(self, c) -> str: return "// " + c.comment + "\n" def format_array_decl(self, arr) -> str: dtype = arr.symbol.dtype typename = self._dtype_to_name(dtype) symbol = self.c_format(arr.symbol) dims = "".join([f"[{i}]" for i in arr.sizes]) if arr.values is None: assert arr.const is False return f"{typename} {symbol}{dims};\n" vals = self._build_initializer_lists(arr.values) cstr = "static const " if arr.const else "" return f"{cstr}{typename} {symbol}{dims} = {vals};\n" def format_array_access(self, arr) -> str: name = self.c_format(arr.array) indices = f"[{']['.join(self.c_format(i) for i in arr.indices)}]" return f"{name}{indices}" def format_variable_decl(self, v) -> str: val = self.c_format(v.value) symbol = self.c_format(v.symbol) typename = self._dtype_to_name(v.symbol.dtype) return f"{typename} {symbol} = {val};\n" def format_nary_op(self, oper) -> str: # Format children args = [self.c_format(arg) for arg in oper.args] # Apply parentheses for i in range(len(args)): if oper.args[i].precedence >= oper.precedence: args[i] = "(" + args[i] + ")" # Return combined string return f" {oper.op} ".join(args) def format_binary_op(self, oper) -> str: # Format children lhs = self.c_format(oper.lhs) rhs = self.c_format(oper.rhs) # Apply parentheses if oper.lhs.precedence >= oper.precedence: lhs = f"({lhs})" if oper.rhs.precedence >= oper.precedence: rhs = f"({rhs})" # Return combined string return f"{lhs} {oper.op} {rhs}" def format_unary_op(self, oper) -> str: arg = self.c_format(oper.arg) if oper.arg.precedence >= oper.precedence: return f"{oper.op}({arg})" return f"{oper.op}{arg}" def format_literal_float(self, val) -> str: value = self._format_number(val.value) return f"{value}" def format_literal_int(self, val) -> str: return f"{val.value}" def format_for_range(self, r) -> str: begin = self.c_format(r.begin) end = self.c_format(r.end) index = self.c_format(r.index) output = f"for (int {index} = {begin}; {index} < {end}; ++{index})\n" output += "{\n" body = self.c_format(r.body) for line in body.split("\n"): if len(line) > 0: output += f" {line}\n" output += "}\n" return output def format_statement(self, s) -> str: return self.c_format(s.expr) def format_assign(self, expr) -> str: rhs = self.c_format(expr.rhs) lhs = self.c_format(expr.lhs) return f"{lhs} {expr.op} {rhs};\n" def format_conditional(self, s) -> str: # Format children c = self.c_format(s.condition) t = self.c_format(s.true) f = self.c_format(s.false) # Apply parentheses if s.condition.precedence >= s.precedence: c = "(" + c + ")" if s.true.precedence >= s.precedence: t = "(" + t + ")" if s.false.precedence >= s.precedence: f = "(" + f + ")" # Return combined string return c + " ? " + t + " : " + f def format_symbol(self, s) -> str: return f"{s.name}" def format_multi_index(self, mi) -> str: return self.c_format(mi.global_index) def format_math_function(self, c) -> str: # Get a table of functions for this type, if available arg_type = self.scalar_type if hasattr(c.args[0], "dtype"): if c.args[0].dtype == L.DataType.REAL: arg_type = self.real_type else: warnings.warn(f"Syntax item without dtype {c.args[0]}") dtype_math_table = math_table.get(arg_type, {}) # Get a function from the table, if available, else just use bare name func = dtype_math_table.get(c.function, c.function) args = ", ".join(self.c_format(arg) for arg in c.args) return f"{func}({args})" c_impl = { "StatementList": format_statement_list, "Comment": format_comment, "ArrayDecl": format_array_decl, "ArrayAccess": format_array_access, "MultiIndex": format_multi_index, "VariableDecl": format_variable_decl, "ForRange": format_for_range, "Statement": format_statement, "Assign": format_assign, "AssignAdd": format_assign, "Product": format_nary_op, "Neg": format_unary_op, "Sum": format_nary_op, "Add": format_binary_op, "Sub": format_binary_op, "Mul": format_binary_op, "Div": format_binary_op, "Not": format_unary_op, "LiteralFloat": format_literal_float, "LiteralInt": format_literal_int, "Symbol": format_symbol, "Conditional": format_conditional, "MathFunction": format_math_function, "And": format_binary_op, "Or": format_binary_op, "NE": format_binary_op, "EQ": format_binary_op, "GE": format_binary_op, "LE": format_binary_op, "GT": format_binary_op, "LT": format_binary_op, } def c_format(self, s) -> str: name = s.__class__.__name__ try: return self.c_impl[name](self, s) except KeyError: raise RuntimeError("Unknown statement: ", name) ffcx-0.7.0/ffcx/codegeneration/C/dofmap.py000066400000000000000000000070301450721277100203700ustar00rootroot00000000000000# Copyright (C) 2009-2018 Anders Logg, Martin Sandve Alnæs and Garth N. Wells # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later # # Note: Most of the code in this file is a direct translation from the # old implementation in FFC import logging import ffcx.codegeneration.C.dofmap_template as ufcx_dofmap logger = logging.getLogger("ffcx") def generator(ir, options): """Generate UFC code for a dofmap.""" logger.info("Generating code for dofmap:") logger.info(f"--- num element support dofs: {ir.num_element_support_dofs}") logger.info(f"--- name: {ir.name}") d = {} # Attributes d["factory_name"] = ir.name d["signature"] = f'"{ir.signature}"' d["num_global_support_dofs"] = ir.num_global_support_dofs d["num_element_support_dofs"] = ir.num_element_support_dofs d["num_sub_dofmaps"] = ir.num_sub_dofmaps flattened_entity_dofs = [] entity_dof_offsets = [0] for dim in ir.entity_dofs: for ent in dim: for v in ent: flattened_entity_dofs.append(v) entity_dof_offsets.append(len(flattened_entity_dofs)) d["entity_dofs"] = f"entity_dofs_{ir.name}" values = ", ".join(str(i) for i in flattened_entity_dofs) sizes = len(flattened_entity_dofs) d["entity_dofs_init"] = f"int entity_dofs_{ir.name}[{sizes}] = {{{values}}};" d["entity_dof_offsets"] = f"entity_dof_offsets_{ir.name}" values = ", ".join(str(i) for i in entity_dof_offsets) sizes = len(entity_dof_offsets) d["entity_dof_offsets_init"] = f"int entity_dof_offsets_{ir.name}[{sizes}] = {{{values}}};" # Closure flattened_entity_closure_dofs = [] entity_closure_dof_offsets = [0] for dim in ir.entity_closure_dofs: for ent in dim: for v in ent: flattened_entity_closure_dofs.append(v) entity_closure_dof_offsets.append(len(flattened_entity_closure_dofs)) d["entity_closure_dofs"] = f"entity_closure_dofs_{ir.name}" values = ", ".join(str(i) for i in flattened_entity_closure_dofs) sizes = len(flattened_entity_closure_dofs) d["entity_closure_dofs_init"] = f"int entity_closure_dofs_{ir.name}[{sizes}] = {{{values}}};" d["entity_closure_dof_offsets"] = f"entity_closure_dof_offsets_{ir.name}" values = ", ".join(str(i) for i in entity_closure_dof_offsets) sizes = len(entity_dof_offsets) d["entity_closure_dof_offsets_init"] = f"int entity_closure_dof_offsets_{ir.name}[{sizes}] = {{{values}}};" d["block_size"] = ir.block_size if len(ir.sub_dofmaps) > 0: values = ", ".join(f"&{dofmap}" for dofmap in ir.sub_dofmaps) sizes = len(ir.sub_dofmaps) d["sub_dofmaps_initialization"] = f"ufcx_dofmap* sub_dofmaps_{ir.name}[{sizes}] = {{{values}}};" d["sub_dofmaps"] = f"sub_dofmaps_{ir.name}" else: d["sub_dofmaps_initialization"] = "" d["sub_dofmaps"] = "NULL" # Check that no keys are redundant or have been missed from string import Formatter fields = [ fname for _, fname, _, _ in Formatter().parse(ufcx_dofmap.factory) if fname ] # Remove square brackets from any field names fields = [f.split("[")[0] for f in fields] assert set(fields) == set( d.keys() ), "Mismatch between keys in template and in formatting dict." # Format implementation code implementation = ufcx_dofmap.factory.format_map(d) # Format declaration declaration = ufcx_dofmap.declaration.format(factory_name=ir.name) return declaration, implementation ffcx-0.7.0/ffcx/codegeneration/C/dofmap_template.py000066400000000000000000000016541450721277100222710ustar00rootroot00000000000000# Code generation format strings for UFC (Unified Form-assembly Code) # This code is released into the public domain. # # The FEniCS Project (http://www.fenicsproject.org/) 2018. declaration = """ extern ufcx_dofmap {factory_name}; """ factory = """ // Code for dofmap {factory_name} {sub_dofmaps_initialization} {entity_dofs_init} {entity_dof_offsets_init} {entity_closure_dofs_init} {entity_closure_dof_offsets_init} ufcx_dofmap {factory_name} = {{ .signature = {signature}, .num_global_support_dofs = {num_global_support_dofs}, .num_element_support_dofs = {num_element_support_dofs}, .block_size = {block_size}, .entity_dofs = {entity_dofs}, .entity_dof_offsets = {entity_dof_offsets}, .entity_closure_dofs = {entity_closure_dofs}, .entity_closure_dof_offsets = {entity_closure_dof_offsets}, .num_sub_dofmaps = {num_sub_dofmaps}, .sub_dofmaps = {sub_dofmaps} }}; // End of code for dofmap {factory_name} """ ffcx-0.7.0/ffcx/codegeneration/C/expressions.py000066400000000000000000000120661450721277100215110ustar00rootroot00000000000000# Copyright (C) 2019 Michal Habera # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import logging from ffcx.codegeneration.C import expressions_template from ffcx.codegeneration.expression_generator import ExpressionGenerator from ffcx.codegeneration.backend import FFCXBackend from ffcx.codegeneration.C.c_implementation import CFormatter from ffcx.codegeneration.utils import cdtype_to_numpy, scalar_to_value_type logger = logging.getLogger("ffcx") def generator(ir, options): """Generate UFC code for an expression.""" logger.info("Generating code for expression:") logger.info(f"--- points: {ir.points}") logger.info(f"--- name: {ir.name}") factory_name = ir.name # Format declaration declaration = expressions_template.declaration.format( factory_name=factory_name, name_from_uflfile=ir.name_from_uflfile) backend = FFCXBackend(ir, options) eg = ExpressionGenerator(ir, backend) d = {} d["name_from_uflfile"] = ir.name_from_uflfile d["factory_name"] = ir.name parts = eg.generate() CF = CFormatter(options["scalar_type"]) d["tabulate_expression"] = CF.c_format(parts) if len(ir.original_coefficient_positions) > 0: d["original_coefficient_positions"] = f"original_coefficient_positions_{ir.name}" values = ", ".join(str(i) for i in ir.original_coefficient_positions) sizes = len(ir.original_coefficient_positions) d["original_coefficient_positions_init"] = \ f"static int original_coefficient_positions_{ir.name}[{sizes}] = {{{values}}};" else: d["original_coefficient_positions"] = "NULL" d["original_coefficient_positions_init"] = "" values = ", ".join(str(p) for p in ir.points.flatten()) sizes = ir.points.size d["points_init"] = f"static double points_{ir.name}[{sizes}] = {{{values}}};" d["points"] = f"points_{ir.name}" if len(ir.expression_shape) > 0: values = ", ".join(str(i) for i in ir.expression_shape) sizes = len(ir.expression_shape) d["value_shape_init"] = f"static int value_shape_{ir.name}[{sizes}] = {{{values}}};" d["value_shape"] = f"value_shape_{ir.name}" else: d["value_shape_init"] = "" d["value_shape"] = "NULL" d["num_components"] = len(ir.expression_shape) d["num_coefficients"] = len(ir.coefficient_numbering) d["num_constants"] = len(ir.constant_names) d["num_points"] = ir.points.shape[0] d["topological_dimension"] = ir.points.shape[1] d["scalar_type"] = options["scalar_type"] d["geom_type"] = scalar_to_value_type(options["scalar_type"]) d["np_scalar_type"] = cdtype_to_numpy(options["scalar_type"]) d["rank"] = len(ir.tensor_shape) if len(ir.coefficient_names) > 0: values = ", ".join(f'"{name}"' for name in ir.coefficient_names) sizes = len(ir.coefficient_names) d["coefficient_names_init"] = f"static const char* coefficient_names_{ir.name}[{sizes}] = {{{values}}};" d["coefficient_names"] = f"coefficient_names_{ir.name}" else: d["coefficient_names_init"] = "" d["coefficient_names"] = "NULL" if len(ir.constant_names) > 0: values = ", ".join(f'"{name}"' for name in ir.constant_names) sizes = len(ir.constant_names) d["constant_names_init"] = f"static const char* constant_names_{ir.name}[{sizes}] = {{{values}}};" d["constant_names"] = f"constant_names_{ir.name}" else: d["constant_names_init"] = "" d["constant_names"] = "NULL" code = [] # FIXME: Should be handled differently, revise how # ufcx_function_space is generated (also for ufcx_form) for (name, (element, dofmap, cmap_family, cmap_degree)) in ir.function_spaces.items(): code += [f"static ufcx_function_space function_space_{name}_{ir.name_from_uflfile} ="] code += ["{"] code += [f".finite_element = &{element},"] code += [f".dofmap = &{dofmap},"] code += [f".geometry_family = \"{cmap_family}\","] code += [f".geometry_degree = {cmap_degree}"] code += ["};"] d["function_spaces_alloc"] = "\n".join(code) d["function_spaces"] = "" if len(ir.function_spaces) > 0: d["function_spaces"] = f"function_spaces_{ir.name}" values = ", ".join(f"&function_space_{name}_{ir.name_from_uflfile}" for (name, _) in ir.function_spaces.items()) sizes = len(ir.function_spaces) d["function_spaces_init"] = f"ufcx_function_space* function_spaces_{ir.name}[{sizes}] = {{{values}}};" else: d["function_spaces"] = "NULL" d["function_spaces_init"] = "" # Check that no keys are redundant or have been missed from string import Formatter fields = [fname for _, fname, _, _ in Formatter().parse(expressions_template.factory) if fname] assert set(fields) == set(d.keys()), "Mismatch between keys in template and in formatting dict" # Format implementation code implementation = expressions_template.factory.format_map(d) return declaration, implementation ffcx-0.7.0/ffcx/codegeneration/C/expressions_template.py000066400000000000000000000033721450721277100234040ustar00rootroot00000000000000# Copyright (C) 2019 Michal Habera # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later declaration = """ extern ufcx_expression {factory_name}; // Helper used to create expression using name which was given to the // expression in the UFL file. // This helper is called in user c++ code. // extern ufcx_expression* {name_from_uflfile}; """ factory = """ // Code for expression {factory_name} void tabulate_tensor_{factory_name}({scalar_type}* restrict A, const {scalar_type}* restrict w, const {scalar_type}* restrict c, const {geom_type}* restrict coordinate_dofs, const int* restrict entity_local_index, const uint8_t* restrict quadrature_permutation) {{ {tabulate_expression} }} {points_init} {value_shape_init} {original_coefficient_positions_init} {function_spaces_alloc} {function_spaces_init} {coefficient_names_init} {constant_names_init} ufcx_expression {factory_name} = {{ .tabulate_tensor_{np_scalar_type} = tabulate_tensor_{factory_name}, .num_coefficients = {num_coefficients}, .num_constants = {num_constants}, .original_coefficient_positions = {original_coefficient_positions}, .coefficient_names = {coefficient_names}, .constant_names = {constant_names}, .num_points = {num_points}, .topological_dimension = {topological_dimension}, .points = {points}, .value_shape = {value_shape}, .num_components = {num_components}, .rank = {rank}, .function_spaces = {function_spaces} }}; // Alias name ufcx_expression* {name_from_uflfile} = &{factory_name}; // End of code for expression {factory_name} """ ffcx-0.7.0/ffcx/codegeneration/C/file.py000066400000000000000000000025451450721277100200470ustar00rootroot00000000000000# Copyright (C) 2009-2018 Anders Logg, Martin Sandve Alnæs and Garth N. Wells # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later # # Note: Most of the code in this file is a direct translation from the # old implementation in FFC import logging import pprint import textwrap from ffcx.codegeneration.C import file_template from ffcx import __version__ as FFCX_VERSION from ffcx.codegeneration import __version__ as UFC_VERSION logger = logging.getLogger("ffcx") def generator(options): """Generate UFC code for file output.""" logger.info("Generating code for file") # Attributes d = {"ffcx_version": FFCX_VERSION, "ufcx_version": UFC_VERSION} d["options"] = textwrap.indent(pprint.pformat(options), "// ") extra_c_includes = [] if "_Complex" in options["scalar_type"]: extra_c_includes += ["complex.h"] d["extra_c_includes"] = "\n".join( f"#include <{header}>" for header in extra_c_includes ) # Format declaration code code_pre = ( file_template.declaration_pre.format_map(d), file_template.implementation_pre.format_map(d), ) # Format implementation code code_post = ( file_template.declaration_post.format_map(d), file_template.implementation_post.format_map(d), ) return code_pre, code_post ffcx-0.7.0/ffcx/codegeneration/C/file_template.py000066400000000000000000000016431450721277100217400ustar00rootroot00000000000000# Code generation format strings for UFC (Unified Form-assembly Code) # This code is released into the public domain. # # The FEniCS Project (http://www.fenicsproject.org/) 2018. declaration_pre = """ // This code conforms with the UFC specification version {ufcx_version} // and was automatically generated by FFCx version {ffcx_version}. // // This code was generated with the following options: // {options} #pragma once #include #ifdef __cplusplus extern "C" {{ #endif """ declaration_post = """ #ifdef __cplusplus }} #endif """ implementation_pre = """ // This code conforms with the UFC specification version {ufcx_version} // and was automatically generated by FFCx version {ffcx_version}. // // This code was generated with the following options: // {options} #include #include #include #include #include {extra_c_includes} """ implementation_post = "" ffcx-0.7.0/ffcx/codegeneration/C/finite_element.py000066400000000000000000000154601450721277100221170ustar00rootroot00000000000000# Copyright (C) 2009-2022 Anders Logg, Martin Sandve Alnæs, Matthew Scroggs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later # Note: Much of the code in this file is a direct translation # from the old implementation in FFC, although some improvements # have been made to the generated code. import logging import ffcx.codegeneration.C.basix_custom_element_template as ufcx_basix_custom_finite_element import ffcx.codegeneration.C.finite_element_template as ufcx_finite_element import ufl logger = logging.getLogger("ffcx") index_type = "int" def generator(ir, options): """Generate UFC code for a finite element.""" logger.info("Generating code for finite element:") logger.info(f"--- family: {ir.family}") logger.info(f"--- degree: {ir.degree}") logger.info(f"--- value shape: {ir.value_shape}") logger.info(f"--- name: {ir.name}") d = {} d["factory_name"] = ir.name d["signature"] = f"\"{ir.signature}\"" d["geometric_dimension"] = ir.geometric_dimension d["topological_dimension"] = ir.topological_dimension d["cell_shape"] = ir.cell_shape d["element_type"] = ir.element_type d["space_dimension"] = ir.space_dimension d["value_rank"] = len(ir.value_shape) d["value_size"] = ufl.product(ir.value_shape) d["reference_value_rank"] = len(ir.reference_value_shape) d["reference_value_size"] = ufl.product(ir.reference_value_shape) d["degree"] = ir.degree d["family"] = f"\"{ir.family}\"" d["num_sub_elements"] = ir.num_sub_elements d["block_size"] = ir.block_size d["discontinuous"] = "true" if ir.discontinuous else "false" if ir.lagrange_variant is None: d["lagrange_variant"] = -1 else: d["lagrange_variant"] = int(ir.lagrange_variant) if ir.dpc_variant is None: d["dpc_variant"] = -1 else: d["dpc_variant"] = int(ir.dpc_variant) if ir.basix_family is None: d["basix_family"] = -1 else: d["basix_family"] = int(ir.basix_family) if ir.basix_cell is None: d["basix_cell"] = -1 else: d["basix_cell"] = int(ir.basix_cell) if len(ir.value_shape) > 0: d["value_shape"] = f"value_shape_{ir.name}" values = ", ".join(str(i) for i in ir.value_shape) sizes = len(ir.value_shape) d["value_shape_init"] = f"int value_shape_{ir.name}[{sizes}] = {{{values}}};" else: d["value_shape"] = "NULL" d["value_shape_init"] = "" if len(ir.reference_value_shape) > 0: d["reference_value_shape"] = f"reference_value_shape_{ir.name}" values = ", ".join(str(i) for i in ir.reference_value_shape) sizes = len(ir.reference_value_shape) d["reference_value_shape_init"] = f"int reference_value_shape_{ir.name}[{sizes}] = {{{values}}};" else: d["reference_value_shape"] = "NULL" d["reference_value_shape_init"] = "" if len(ir.sub_elements) > 0: d["sub_elements"] = f"sub_elements_{ir.name}" values = ", ".join(f"&{el}" for el in ir.sub_elements) sizes = len(ir.sub_elements) d["sub_elements_init"] = f"ufcx_finite_element* sub_elements_{ir.name}[{sizes}] = {{{values}}};" else: d["sub_elements"] = "NULL" d["sub_elements_init"] = "" if ir.custom_element is not None: d["custom_element"] = f"&custom_element_{ir.name}" d["custom_element_init"] = generate_custom_element(f"custom_element_{ir.name}", ir.custom_element) else: d["custom_element"] = "NULL" d["custom_element_init"] = "" # Check that no keys are redundant or have been missed from string import Formatter fieldnames = [ fname for _, fname, _, _ in Formatter().parse(ufcx_finite_element.factory) if fname ] assert set(fieldnames) == set( d.keys()), "Mismatch between keys in template and in formatting dict" # Format implementation code implementation = ufcx_finite_element.factory.format_map(d) # Format declaration declaration = ufcx_finite_element.declaration.format(factory_name=ir.name) return declaration, implementation def generate_custom_element(name, ir): d = {} d["factory_name"] = name d["cell_type"] = int(ir.cell_type) d["polyset_type"] = int(ir.polyset_type) d["map_type"] = int(ir.map_type) d["sobolev_space"] = int(ir.sobolev_space) d["highest_complete_degree"] = ir.highest_complete_degree d["highest_degree"] = ir.highest_degree d["discontinuous"] = "true" if ir.discontinuous else "false" d["interpolation_nderivs"] = ir.interpolation_nderivs d["value_shape_length"] = len(ir.value_shape) if len(ir.value_shape) > 0: d["value_shape"] = f"value_shape_{name}" values = ", ".join(str(i) for i in ir.value_shape) sizes = len(ir.value_shape) d["value_shape_init"] = f"int value_shape_{name}[{sizes}] = {{{values}}};" else: d["value_shape"] = "NULL" d["value_shape_init"] = "" d["wcoeffs_rows"] = ir.wcoeffs.shape[0] d["wcoeffs_cols"] = ir.wcoeffs.shape[1] d["wcoeffs"] = f"wcoeffs_{name}" d["wcoeffs_init"] = f"double wcoeffs_{name}[{ir.wcoeffs.shape[0] * ir.wcoeffs.shape[1]}] = " d["wcoeffs_init"] += "{" + ",".join([f" {i}" for row in ir.wcoeffs for i in row]) + "};" npts = [] x = [] for entity in ir.x: for points in entity: npts.append(points.shape[0]) for row in points: for i in row: x.append(i) d["npts"] = f"npts_{name}" d["npts_init"] = f"int npts_{name}[{len(npts)}] = " d["npts_init"] += "{" + ",".join([f" {i}" for i in npts]) + "};" d["x"] = f"x_{name}" d["x_init"] = f"double x_{name}[{len(x)}] = " d["x_init"] += "{" + ",".join([f" {i}" for i in x]) + "};" ndofs = [] M = [] for entity in ir.M: for mat4d in entity: ndofs.append(mat4d.shape[0]) for mat3d in mat4d: for mat2d in mat3d: for row in mat2d: for i in row: M.append(i) d["ndofs"] = f"ndofs_{name}" d["ndofs_init"] = f"int ndofs_{name}[{len(ndofs)}] = " d["ndofs_init"] += "{" + ",".join([f" {i}" for i in ndofs]) + "};" d["M"] = f"M_{name}" d["M_init"] = f"double M_{name}[{len(M)}] = " d["M_init"] += "{" + ",".join([f" {i}" for i in M]) + "};" # Check that no keys are redundant or have been missed from string import Formatter fieldnames = [ fname for _, fname, _, _ in Formatter().parse(ufcx_basix_custom_finite_element.factory) if fname ] assert set(fieldnames) == set( d.keys()), "Mismatch between keys in template and in formatting dict" # Format implementation code implementation = ufcx_basix_custom_finite_element.factory.format_map(d) return implementation ffcx-0.7.0/ffcx/codegeneration/C/finite_element_template.py000066400000000000000000000024431450721277100240070ustar00rootroot00000000000000# Code generation format strings for UFC (Unified Form-assembly Code) # This code is released into the public domain. # # The FEniCS Project (http://www.fenicsproject.org/) 2018. declaration = """ extern ufcx_finite_element {factory_name}; """ factory = """ // Code for element {factory_name} {value_shape_init} {reference_value_shape_init} {sub_elements_init} {custom_element_init} ufcx_finite_element {factory_name} = {{ .signature = {signature}, .cell_shape = {cell_shape}, .element_type = {element_type}, .topological_dimension = {topological_dimension}, .geometric_dimension = {geometric_dimension}, .space_dimension = {space_dimension}, .value_rank = {value_rank}, .value_shape = {value_shape}, .value_size = {value_size}, .reference_value_rank = {reference_value_rank}, .reference_value_shape = {reference_value_shape}, .reference_value_size = {reference_value_size}, .degree = {degree}, .block_size = {block_size}, .family = {family}, .basix_family = {basix_family}, .basix_cell = {basix_cell}, .discontinuous = {discontinuous}, .lagrange_variant = {lagrange_variant}, .dpc_variant = {dpc_variant}, .num_sub_elements = {num_sub_elements}, .sub_elements = {sub_elements}, .custom_element = {custom_element} }}; // End of code for element {factory_name} """ ffcx-0.7.0/ffcx/codegeneration/C/form.py000066400000000000000000000134531450721277100200730ustar00rootroot00000000000000# Copyright (C) 2009-2017 Anders Logg and Martin Sandve Alnæs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later # # Modified by Chris Richardson and Jørgen S. Dokken 2023 # # Note: Most of the code in this file is a direct translation from the # old implementation in FFC import logging import numpy from ffcx.codegeneration.C import form_template logger = logging.getLogger("ffcx") def generator(ir, options): """Generate UFC code for a form.""" logger.info("Generating code for form:") logger.info(f"--- rank: {ir.rank}") logger.info(f"--- name: {ir.name}") d = {} d["factory_name"] = ir.name d["name_from_uflfile"] = ir.name_from_uflfile d["signature"] = f'"{ir.signature}"' d["rank"] = ir.rank d["num_coefficients"] = ir.num_coefficients d["num_constants"] = ir.num_constants if len(ir.original_coefficient_position) > 0: values = ", ".join(str(i) for i in ir.original_coefficient_position) sizes = len(ir.original_coefficient_position) d["original_coefficient_position_init"] = \ f"int original_coefficient_position_{ir.name}[{sizes}] = {{{values}}};" d["original_coefficient_position"] = f"original_coefficient_position_{ir.name}" else: d["original_coefficient_position_init"] = "" d["original_coefficient_position"] = "NULL" cnames = ir.coefficient_names assert ir.num_coefficients == len(cnames) if len(cnames) == 0: code = ["return NULL;"] else: values = ", ".join(f'"{name}"' for name in cnames) code = [f"static const char* names[{len(cnames)}] = {{{values}}};", "return names;"] d["coefficient_name_map"] = "\n".join(code) cstnames = ir.constant_names if len(cstnames) == 0: code = ["return NULL;"] else: values = ", ".join(f'"{name}"' for name in cstnames) code = [f"static const char* names[{len(cstnames)}] = {{{values}}};", "return names;"] d["constant_name_map"] = "\n".join(code) if len(ir.finite_elements) > 0: d["finite_elements"] = f"finite_elements_{ir.name}" values = ", ".join(f"&{el}" for el in ir.finite_elements) sizes = len(ir.finite_elements) d["finite_elements_init"] = f"ufcx_finite_element* finite_elements_{ir.name}[{sizes}] = {{{values}}};" else: d["finite_elements"] = "NULL" d["finite_elements_init"] = "" if len(ir.dofmaps) > 0: d["dofmaps"] = f"dofmaps_{ir.name}" values = ", ".join(f"&{dofmap}" for dofmap in ir.dofmaps) sizes = len(ir.dofmaps) d["dofmaps_init"] = f"ufcx_dofmap* dofmaps_{ir.name}[{sizes}] = {{{values}}};" else: d["dofmaps"] = "NULL" d["dofmaps_init"] = "" integrals = [] integral_ids = [] integral_offsets = [0] # Note: the order of this list is defined by the enum ufcx_integral_type in ufcx.h for itg_type in ("cell", "exterior_facet", "interior_facet"): unsorted_integrals = [] unsorted_ids = [] for name, id in zip(ir.integral_names[itg_type], ir.subdomain_ids[itg_type]): unsorted_integrals += [f"&{name}"] unsorted_ids += [id] id_sort = numpy.argsort(unsorted_ids) integrals += [unsorted_integrals[i] for i in id_sort] integral_ids += [unsorted_ids[i] for i in id_sort] integral_offsets.append(len(integrals)) if len(integrals) > 0: sizes = len(integrals) values = ", ".join(integrals) d["form_integrals_init"] = f"static ufcx_integral* form_integrals_{ir.name}[{sizes}] = {{{values}}};" d["form_integrals"] = f"form_integrals_{ir.name}" sizes = len(integral_ids) values = ", ".join(str(i) for i in integral_ids) d["form_integral_ids_init"] = f"int form_integral_ids_{ir.name}[{sizes}] = {{{values}}};" d["form_integral_ids"] = f"form_integral_ids_{ir.name}" else: d["form_integrals_init"] = "" d["form_integrals"] = "NULL" d["form_integral_ids_init"] = "" d["form_integral_ids"] = "NULL" sizes = len(integral_offsets) values = ", ".join(str(i) for i in integral_offsets) d["form_integral_offsets_init"] = f"int form_integral_offsets_{ir.name}[{sizes}] = {{{values}}};" code = [] # FIXME: Should be handled differently, revise how # ufcx_function_space is generated for name, ( element, dofmap, cmap_family, cmap_degree, cmap_celltype, cmap_variant, ) in ir.function_spaces.items(): code += [f"static ufcx_function_space functionspace_{name} ="] code += ["{"] code += [f".finite_element = &{element},"] code += [f".dofmap = &{dofmap},"] code += [f'.geometry_family = "{cmap_family}",'] code += [f".geometry_degree = {cmap_degree},"] code += [f".geometry_basix_cell = {int(cmap_celltype)},"] code += [f".geometry_basix_variant = {int(cmap_variant)}"] code += ["};"] for name in ir.function_spaces.keys(): code += [f'if (strcmp(function_name, "{name}") == 0) return &functionspace_{name};'] code += ["return NULL;\n"] d["functionspace"] = "\n".join(code) # Check that no keys are redundant or have been missed from string import Formatter fields = [ fname for _, fname, _, _ in Formatter().parse(form_template.factory) if fname ] assert set(fields) == set( d.keys() ), "Mismatch between keys in template and in formatting dict" # Format implementation code implementation = form_template.factory.format_map(d) # Format declaration declaration = form_template.declaration.format( factory_name=d["factory_name"], name_from_uflfile=d["name_from_uflfile"] ) return declaration, implementation ffcx-0.7.0/ffcx/codegeneration/C/form_template.py000066400000000000000000000034071450721277100217640ustar00rootroot00000000000000# Code generation format strings for UFC (Unified Form-assembly Code) # This code is released into the public domain. # # The FEniCS Project (http://www.fenicsproject.org/) 2020. declaration = """ extern ufcx_form {factory_name}; // Helper used to create form using name which was given to the // form in the UFL file. // This helper is called in user c++ code. // extern ufcx_form* {name_from_uflfile}; // Helper used to create function space using function name // i.e. name of the Python variable. // ufcx_function_space* functionspace_{name_from_uflfile}(const char* function_name); """ factory = """ // Code for form {factory_name} {original_coefficient_position_init} {dofmaps_init} {finite_elements_init} {form_integral_offsets_init} {form_integrals_init} {form_integral_ids_init} // Return a list of the coefficient names. const char** coefficient_name_{factory_name}(void) {{ {coefficient_name_map} }} // Return a list of the constant names. const char** constant_name_{factory_name}(void) {{ {constant_name_map} }} ufcx_form {factory_name} = {{ .signature = {signature}, .rank = {rank}, .num_coefficients = {num_coefficients}, .num_constants = {num_constants}, .original_coefficient_position = {original_coefficient_position}, .coefficient_name_map = coefficient_name_{factory_name}, .constant_name_map = constant_name_{factory_name}, .finite_elements = {finite_elements}, .dofmaps = {dofmaps}, .form_integrals = {form_integrals}, .form_integral_ids = {form_integral_ids}, .form_integral_offsets = form_integral_offsets_{factory_name} }}; // Alias name ufcx_form* {name_from_uflfile} = &{factory_name}; ufcx_function_space* functionspace_{name_from_uflfile}(const char* function_name) {{ {functionspace} }} // End of code for form {factory_name} """ ffcx-0.7.0/ffcx/codegeneration/C/integrals.py000066400000000000000000000047041450721277100211170ustar00rootroot00000000000000# Copyright (C) 2015-2021 Martin Sandve Alnæs, Michal Habera, Igor Baratta # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import logging from ffcx.codegeneration.integral_generator import IntegralGenerator from ffcx.codegeneration.C import integrals_template as ufcx_integrals from ffcx.codegeneration.backend import FFCXBackend from ffcx.codegeneration.C.c_implementation import CFormatter from ffcx.codegeneration.utils import cdtype_to_numpy, scalar_to_value_type logger = logging.getLogger("ffcx") def generator(ir, options): logger.info("Generating code for integral:") logger.info(f"--- type: {ir.integral_type}") logger.info(f"--- name: {ir.name}") """Generate code for an integral.""" factory_name = ir.name # Format declaration declaration = ufcx_integrals.declaration.format(factory_name=factory_name) # Create FFCx C backend backend = FFCXBackend(ir, options) # Configure kernel generator ig = IntegralGenerator(ir, backend) # Generate code ast for the tabulate_tensor body parts = ig.generate() # Format code as string CF = CFormatter(options["scalar_type"]) body = CF.c_format(parts) # Generate generic FFCx code snippets and add specific parts code = {} if len(ir.enabled_coefficients) > 0: values = ", ".join("1" if i else "0" for i in ir.enabled_coefficients) sizes = len(ir.enabled_coefficients) code["enabled_coefficients_init"] = f"bool enabled_coefficients_{ir.name}[{sizes}] = {{{values}}};" code["enabled_coefficients"] = f"enabled_coefficients_{ir.name}" else: code["enabled_coefficients_init"] = "" code["enabled_coefficients"] = "NULL" code["additional_includes_set"] = set() # FIXME: Get this out of code[] code["tabulate_tensor"] = body implementation = ufcx_integrals.factory.format( factory_name=factory_name, enabled_coefficients=code["enabled_coefficients"], enabled_coefficients_init=code["enabled_coefficients_init"], tabulate_tensor=code["tabulate_tensor"], needs_facet_permutations="true" if ir.needs_facet_permutations else "false", scalar_type=options["scalar_type"], geom_type=scalar_to_value_type(options["scalar_type"]), np_scalar_type=cdtype_to_numpy(options["scalar_type"]), coordinate_element=f"&{ir.coordinate_element}") return declaration, implementation ffcx-0.7.0/ffcx/codegeneration/C/integrals_template.py000066400000000000000000000021101450721277100227770ustar00rootroot00000000000000# Code generation format strings for UFC (Unified Form-assembly Code) # This code is released into the public domain. # # The FEniCS Project (http://www.fenicsproject.org/) 2018 declaration = """ extern ufcx_integral {factory_name}; """ factory = """ // Code for integral {factory_name} void tabulate_tensor_{factory_name}({scalar_type}* restrict A, const {scalar_type}* restrict w, const {scalar_type}* restrict c, const {geom_type}* restrict coordinate_dofs, const int* restrict entity_local_index, const uint8_t* restrict quadrature_permutation) {{ {tabulate_tensor} }} {enabled_coefficients_init} ufcx_integral {factory_name} = {{ .enabled_coefficients = {enabled_coefficients}, .tabulate_tensor_{np_scalar_type} = tabulate_tensor_{factory_name}, .needs_facet_permutations = {needs_facet_permutations}, .coordinate_element = {coordinate_element}, }}; // End of code for integral {factory_name} """ ffcx-0.7.0/ffcx/codegeneration/__init__.py000066400000000000000000000015321450721277100205000ustar00rootroot00000000000000import hashlib import os # Version of FFCx header files __author__ = "FEniCS Project" __license__ = "This code is released into the public domain" __version__ = "2018.2.0.dev0" # Get abspath on import, it can in some cases be a relative path w.r.t. # curdir on startup _include_path = os.path.dirname(os.path.abspath(__file__)) def get_include_path(): """Return location of UFC header files.""" return _include_path def _compute_signature(): # Compute signature of ufc header files h = hashlib.sha1() with open(os.path.join(get_include_path(), "ufcx.h")) as f: h.update(f.read().encode("utf-8")) return h.hexdigest() _signature = _compute_signature() def get_signature(): """Return SHA-1 hash of the contents of ufcx.h. In this implementation, the value is computed on import. """ return _signature ffcx-0.7.0/ffcx/codegeneration/access.py000066400000000000000000000377531450721277100202200ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """FFCx/UFC specific variable access.""" import logging import warnings import ufl import basix.ufl from ffcx.element_interface import convert_element import ffcx.codegeneration.lnodes as L logger = logging.getLogger("ffcx") class FFCXBackendAccess(object): """FFCx specific cpp formatter class.""" def __init__(self, ir, symbols, options): # Store ir and options self.entitytype = ir.entitytype self.integral_type = ir.integral_type self.symbols = symbols self.options = options # Lookup table for handler to call when the "get" method (below) is # called, depending on the first argument type. self.call_lookup = {ufl.coefficient.Coefficient: self.coefficient, ufl.constant.Constant: self.constant, ufl.geometry.Jacobian: self.jacobian, ufl.geometry.CellCoordinate: self.cell_coordinate, ufl.geometry.FacetCoordinate: self.facet_coordinate, ufl.geometry.CellVertices: self.cell_vertices, ufl.geometry.FacetEdgeVectors: self.facet_edge_vectors, ufl.geometry.CellEdgeVectors: self.cell_edge_vectors, ufl.geometry.CellFacetJacobian: self.cell_facet_jacobian, ufl.geometry.ReferenceCellVolume: self.reference_cell_volume, ufl.geometry.ReferenceFacetVolume: self.reference_facet_volume, ufl.geometry.ReferenceCellEdgeVectors: self.reference_cell_edge_vectors, ufl.geometry.ReferenceFacetEdgeVectors: self.reference_facet_edge_vectors, ufl.geometry.ReferenceNormal: self.reference_normal, ufl.geometry.CellOrientation: self._pass, ufl.geometry.FacetOrientation: self.facet_orientation, ufl.geometry.SpatialCoordinate: self.spatial_coordinate} def get(self, e, mt, tabledata, num_points): # Call appropriate handler, depending on the type of e handler = self.call_lookup.get(type(e), False) if not handler: # Look for parent class types instead for k in self.call_lookup.keys(): if isinstance(e, k): handler = self.call_lookup[k] break if handler: return handler(e, mt, tabledata, num_points) else: raise RuntimeError(f"Not handled: {type(e)}") def coefficient(self, e, mt, tabledata, num_points): ttype = tabledata.ttype assert ttype != "zeros" num_dofs = tabledata.values.shape[3] begin = tabledata.offset end = begin + tabledata.block_size * (num_dofs - 1) + 1 if ttype == "ones" and (end - begin) == 1: # f = 1.0 * f_{begin}, just return direct reference to dof # array at dof begin (if mt is restricted, begin contains # cell offset) return self.symbols.coefficient_dof_access(mt.terminal, begin) else: # Return symbol, see definitions for computation return self.symbols.coefficient_value(mt) def constant(self, e, mt, tabledata, num_points): """Access to a constant is handled trivially, directly through constants symbol.""" return self.symbols.constant_index_access(mt.terminal, mt.flat_component) def spatial_coordinate(self, e, mt, tabledata, num_points): if mt.global_derivatives: raise RuntimeError("Not expecting global derivatives of SpatialCoordinate.") if mt.averaged is not None: raise RuntimeError("Not expecting average of SpatialCoordinates.") if self.integral_type in ufl.custom_integral_types: if mt.local_derivatives: raise RuntimeError("FIXME: Jacobian in custom integrals is not implemented.") # Access predefined quadrature points table x = self.symbols.custom_points_table iq = self.symbols.quadrature_loop_index gdim, = mt.terminal.ufl_shape if gdim == 1: index = iq else: index = iq * gdim + mt.flat_component return x[index] elif self.integral_type == "expression": # Physical coordinates are computed by code generated in # definitions return self.symbols.x_component(mt) else: # Physical coordinates are computed by code generated in # definitions return self.symbols.x_component(mt) def cell_coordinate(self, e, mt, tabledata, num_points): if mt.global_derivatives: raise RuntimeError("Not expecting derivatives of CellCoordinate.") if mt.local_derivatives: raise RuntimeError("Not expecting derivatives of CellCoordinate.") if mt.averaged is not None: raise RuntimeError("Not expecting average of CellCoordinate.") if self.integral_type == "cell" and not mt.restriction: # Access predefined quadrature points table X = self.symbols.points_table(num_points) tdim, = mt.terminal.ufl_shape iq = self.symbols.quadrature_loop_index() if num_points == 1: index = mt.flat_component elif tdim == 1: index = iq else: index = iq * tdim + mt.flat_component return X[index] else: # X should be computed from x or Xf symbolically instead of # getting here raise RuntimeError("Expecting reference cell coordinate to be symbolically rewritten.") def facet_coordinate(self, e, mt, tabledata, num_points): if mt.global_derivatives: raise RuntimeError("Not expecting derivatives of FacetCoordinate.") if mt.local_derivatives: raise RuntimeError("Not expecting derivatives of FacetCoordinate.") if mt.averaged is not None: raise RuntimeError("Not expecting average of FacetCoordinate.") if mt.restriction: raise RuntimeError("Not expecting restriction of FacetCoordinate.") if self.integral_type in ("interior_facet", "exterior_facet"): tdim, = mt.terminal.ufl_shape if tdim == 0: raise RuntimeError("Vertices have no facet coordinates.") elif tdim == 1: warnings.warn( "Vertex coordinate is always 0, should get rid of this in ufl geometry lowering." ) return L.LiteralFloat(0.0) Xf = self.points_table(num_points) iq = self.symbols.quadrature_loop_index() assert 0 <= mt.flat_component < (tdim - 1) if num_points == 1: index = mt.flat_component elif tdim == 2: index = iq else: index = iq * (tdim - 1) + mt.flat_component return Xf[index] else: # Xf should be computed from X or x symbolically instead of # getting here raise RuntimeError("Expecting reference facet coordinate to be symbolically rewritten.") def jacobian(self, e, mt, tabledata, num_points): if mt.averaged is not None: raise RuntimeError("Not expecting average of Jacobian.") return self.symbols.J_component(mt) def reference_cell_volume(self, e, mt, tabledata, access): cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("interval", "triangle", "tetrahedron", "quadrilateral", "hexahedron"): return L.Symbol(f"{cellname}_reference_cell_volume", dtype=L.DataType.REAL) else: raise RuntimeError(f"Unhandled cell types {cellname}.") def reference_facet_volume(self, e, mt, tabledata, access): cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("interval", "triangle", "tetrahedron", "quadrilateral", "hexahedron"): return L.Symbol(f"{cellname}_reference_facet_volume", dtype=L.DataType.REAL) else: raise RuntimeError(f"Unhandled cell types {cellname}.") def reference_normal(self, e, mt, tabledata, access): cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("interval", "triangle", "tetrahedron", "quadrilateral", "hexahedron"): table = L.Symbol(f"{cellname}_reference_facet_normals", dtype=L.DataType.REAL) facet = self.symbols.entity("facet", mt.restriction) return table[facet][mt.component[0]] else: raise RuntimeError(f"Unhandled cell types {cellname}.") def cell_facet_jacobian(self, e, mt, tabledata, num_points): cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("triangle", "tetrahedron", "quadrilateral", "hexahedron"): table = L.Symbol(f"{cellname}_reference_facet_jacobian", dtype=L.DataType.REAL) facet = self.symbols.entity("facet", mt.restriction) return table[facet][mt.component[0]][mt.component[1]] elif cellname == "interval": raise RuntimeError("The reference facet jacobian doesn't make sense for interval cell.") else: raise RuntimeError(f"Unhandled cell types {cellname}.") def reference_cell_edge_vectors(self, e, mt, tabledata, num_points): cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("triangle", "tetrahedron", "quadrilateral", "hexahedron"): table = L.Symbol(f"{cellname}_reference_edge_vectors", dtype=L.DataType.REAL) return table[mt.component[0]][mt.component[1]] elif cellname == "interval": raise RuntimeError("The reference cell edge vectors doesn't make sense for interval cell.") else: raise RuntimeError(f"Unhandled cell types {cellname}.") def reference_facet_edge_vectors(self, e, mt, tabledata, num_points): cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("tetrahedron", "hexahedron"): table = L.Symbol(f"{cellname}_reference_edge_vectors", dtype=L.DataType.REAL) facet = self.symbols.entity("facet", mt.restriction) return table[facet][mt.component[0]][mt.component[1]] elif cellname in ("interval", "triangle", "quadrilateral"): raise RuntimeError( "The reference cell facet edge vectors doesn't make sense for interval or triangle cell." ) else: raise RuntimeError(f"Unhandled cell types {cellname}.") def facet_orientation(self, e, mt, tabledata, num_points): cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname not in ("interval", "triangle", "tetrahedron"): raise RuntimeError(f"Unhandled cell types {cellname}.") table = L.Symbol(f"{cellname}_facet_orientations", dtype=L.DataType.INT) facet = self.symbols.entity("facet", mt.restriction) return table[facet] def cell_vertices(self, e, mt, tabledata, num_points): # Get properties of domain domain = ufl.domain.extract_unique_domain(mt.terminal) gdim = domain.geometric_dimension() coordinate_element = convert_element(domain.ufl_coordinate_element()) # Get dimension and dofmap of scalar element assert isinstance(coordinate_element, basix.ufl._BlockedElement) assert coordinate_element.value_shape() == (gdim, ) ufl_scalar_element, = set(coordinate_element.sub_elements()) scalar_element = convert_element(ufl_scalar_element) assert scalar_element.value_size == 1 and scalar_element.block_size == 1 vertex_scalar_dofs = scalar_element.entity_dofs[0] num_scalar_dofs = scalar_element.dim # Get dof and component dof, = vertex_scalar_dofs[mt.component[0]] component = mt.component[1] expr = self.symbols.domain_dof_access(dof, component, gdim, num_scalar_dofs, mt.restriction) return expr def cell_edge_vectors(self, e, mt, tabledata, num_points): # Get properties of domain domain = ufl.domain.extract_unique_domain(mt.terminal) cellname = domain.ufl_cell().cellname() gdim = domain.geometric_dimension() coordinate_element = convert_element(domain.ufl_coordinate_element()) if cellname in ("triangle", "tetrahedron", "quadrilateral", "hexahedron"): pass elif cellname == "interval": raise RuntimeError("The physical cell edge vectors doesn't make sense for interval cell.") else: raise RuntimeError(f"Unhandled cell types {cellname}.") # Get dimension and dofmap of scalar element assert isinstance(coordinate_element, basix.ufl._BlockedElement) assert coordinate_element.value_shape() == (gdim, ) ufl_scalar_element, = set(coordinate_element.sub_elements()) scalar_element = convert_element(ufl_scalar_element) assert scalar_element.value_size == 1 and scalar_element.block_size == 1 vertex_scalar_dofs = scalar_element.entity_dofs[0] num_scalar_dofs = scalar_element.dim # Get edge vertices edge = mt.component[0] vertex0, vertex1 = scalar_element.reference_topology[1][edge] # Get dofs and component dof0, = vertex_scalar_dofs[vertex0] dof1, = vertex_scalar_dofs[vertex1] component = mt.component[1] return self.symbols.domain_dof_access( dof0, component, gdim, num_scalar_dofs, mt.restriction ) - self.symbols.domain_dof_access( dof1, component, gdim, num_scalar_dofs, mt.restriction ) def facet_edge_vectors(self, e, mt, tabledata, num_points): # Get properties of domain domain = ufl.domain.extract_unique_domain(mt.terminal) cellname = domain.ufl_cell().cellname() gdim = domain.geometric_dimension() coordinate_element = convert_element(domain.ufl_coordinate_element()) if cellname in ("tetrahedron", "hexahedron"): pass elif cellname in ("interval", "triangle", "quadrilateral"): raise RuntimeError( f"The physical facet edge vectors doesn't make sense for {cellname} cell.") else: raise RuntimeError(f"Unhandled cell types {cellname}.") # Get dimension and dofmap of scalar element assert isinstance(coordinate_element, basix.ufl._BlockedElement) assert coordinate_element.value_shape() == (gdim, ) ufl_scalar_element, = set(coordinate_element.sub_elements()) scalar_element = convert_element(ufl_scalar_element) assert scalar_element.value_size == 1 and scalar_element.block_size == 1 scalar_element = convert_element(ufl_scalar_element) num_scalar_dofs = scalar_element.dim # Get edge vertices facet = self.symbols.entity("facet", mt.restriction) facet_edge = mt.component[0] facet_edge_vertices = L.Symbol(f"{cellname}_facet_edge_vertices", dtype=L.DataType.INT) vertex0 = facet_edge_vertices[facet][facet_edge][0] vertex1 = facet_edge_vertices[facet][facet_edge][1] # Get dofs and component component = mt.component[1] assert coordinate_element.degree() == 1, "Assuming degree 1 element" dof0 = vertex0 dof1 = vertex1 expr = ( self.symbols.domain_dof_access(dof0, component, gdim, num_scalar_dofs, mt.restriction) - self.symbols.domain_dof_access(dof1, component, gdim, num_scalar_dofs, mt.restriction)) return expr def _pass(self, *args, **kwargs): """Return one.""" return 1 ffcx-0.7.0/ffcx/codegeneration/backend.py000066400000000000000000000017741450721277100203400ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Collection of FFCx specific pieces for the code generation phase.""" from ffcx.codegeneration.access import FFCXBackendAccess from ffcx.codegeneration.definitions import FFCXBackendDefinitions from ffcx.codegeneration.symbols import FFCXBackendSymbols class FFCXBackend(object): """Class collecting all aspects of the FFCx backend.""" def __init__(self, ir, options): coefficient_numbering = ir.coefficient_numbering coefficient_offsets = ir.coefficient_offsets original_constant_offsets = ir.original_constant_offsets self.symbols = FFCXBackendSymbols(coefficient_numbering, coefficient_offsets, original_constant_offsets) self.definitions = FFCXBackendDefinitions(ir, self.symbols, options) self.access = FFCXBackendAccess(ir, self.symbols, options) ffcx-0.7.0/ffcx/codegeneration/codegeneration.py000066400000000000000000000047441450721277100217370ustar00rootroot00000000000000# Copyright (C) 2009-2017 Anders Logg, Martin Sandve Alnæs, Marie E. Rognes, # Kristian B. Oelgaard, and others # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Compiler stage 4: Code generation. This module implements the generation of C code for the body of each UFC function from an intermediate representation (IR). """ import logging import typing from ffcx.codegeneration.C.dofmap import generator as dofmap_generator from ffcx.codegeneration.C.expressions import generator as expression_generator from ffcx.codegeneration.C.finite_element import \ generator as finite_element_generator from ffcx.codegeneration.C.form import generator as form_generator from ffcx.codegeneration.C.integrals import generator as integral_generator from ffcx.codegeneration.C.file import generator as file_generator logger = logging.getLogger("ffcx") class CodeBlocks(typing.NamedTuple): """ Storage of code blocks of the form (declaration, implementation). Blocks for elements, dofmaps, integrals, forms and expressions, and start and end of file output """ file_pre: typing.List[typing.Tuple[str, str]] elements: typing.List[typing.Tuple[str, str]] dofmaps: typing.List[typing.Tuple[str, str]] integrals: typing.List[typing.Tuple[str, str]] forms: typing.List[typing.Tuple[str, str]] expressions: typing.List[typing.Tuple[str, str]] file_post: typing.List[typing.Tuple[str, str]] def generate_code(ir, options) -> CodeBlocks: """Generate code blocks from intermediate representation.""" logger.info(79 * "*") logger.info("Compiler stage 3: Generating code") logger.info(79 * "*") # Generate code for finite_elements code_finite_elements = [finite_element_generator(element_ir, options) for element_ir in ir.elements] code_dofmaps = [dofmap_generator(dofmap_ir, options) for dofmap_ir in ir.dofmaps] code_integrals = [integral_generator(integral_ir, options) for integral_ir in ir.integrals] code_forms = [form_generator(form_ir, options) for form_ir in ir.forms] code_expressions = [expression_generator(expression_ir, options) for expression_ir in ir.expressions] code_file_pre, code_file_post = file_generator(options) return CodeBlocks(file_pre=[code_file_pre], elements=code_finite_elements, dofmaps=code_dofmaps, integrals=code_integrals, forms=code_forms, expressions=code_expressions, file_post=[code_file_post]) ffcx-0.7.0/ffcx/codegeneration/definitions.py000066400000000000000000000172111450721277100212550ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """FFCx/UFC specific variable definitions.""" import logging import ufl from ffcx.element_interface import convert_element import ffcx.codegeneration.lnodes as L logger = logging.getLogger("ffcx") class FFCXBackendDefinitions(object): """FFCx specific code definitions.""" def __init__(self, ir, symbols, options): # Store ir and options self.integral_type = ir.integral_type self.entitytype = ir.entitytype self.symbols = symbols self.options = options self.ir = ir # Lookup table for handler to call when the "get" method (below) is # called, depending on the first argument type. self.call_lookup = {ufl.coefficient.Coefficient: self.coefficient, ufl.constant.Constant: self.constant, ufl.geometry.Jacobian: self.jacobian, ufl.geometry.CellVertices: self._expect_physical_coords, ufl.geometry.FacetEdgeVectors: self._expect_physical_coords, ufl.geometry.CellEdgeVectors: self._expect_physical_coords, ufl.geometry.CellFacetJacobian: self._expect_table, ufl.geometry.ReferenceCellVolume: self._expect_table, ufl.geometry.ReferenceFacetVolume: self._expect_table, ufl.geometry.ReferenceCellEdgeVectors: self._expect_table, ufl.geometry.ReferenceFacetEdgeVectors: self._expect_table, ufl.geometry.ReferenceNormal: self._expect_table, ufl.geometry.CellOrientation: self._pass, ufl.geometry.FacetOrientation: self._expect_table, ufl.geometry.SpatialCoordinate: self.spatial_coordinate} def get(self, t, mt, tabledata, quadrature_rule, access): # Call appropriate handler, depending on the type of t ttype = type(t) handler = self.call_lookup.get(ttype, False) if not handler: # Look for parent class types instead for k in self.call_lookup.keys(): if isinstance(t, k): handler = self.call_lookup[k] break if handler: return handler(t, mt, tabledata, quadrature_rule, access) else: raise RuntimeError("Not handled: %s", ttype) def coefficient(self, t, mt, tabledata, quadrature_rule, access): """Return definition code for coefficients.""" ttype = tabledata.ttype num_dofs = tabledata.values.shape[3] bs = tabledata.block_size begin = tabledata.offset end = begin + bs * (num_dofs - 1) + 1 if ttype == "zeros": logging.debug("Not expecting zero coefficients to get this far.") return [], [] # For a constant coefficient we reference the dofs directly, so no definition needed if ttype == "ones" and end - begin == 1: return [], [] assert begin < end # Get access to element table FE = self.symbols.element_table(tabledata, self.entitytype, mt.restriction) ic = self.symbols.coefficient_dof_sum_index code = [] pre_code = [] if bs > 1 and not tabledata.is_piecewise: # For bs > 1, the coefficient access has a stride of bs. e.g.: XYZXYZXYZ # When memory access patterns are non-sequential, the number of cache misses increases. # In turn, it results in noticeably reduced performance. # In this case, we create temp arrays outside the quadrature to store the coefficients and # have a sequential access pattern. dof_access, dof_access_map = self.symbols.coefficient_dof_access_blocked(mt.terminal, ic, bs, begin) # If a map is necessary from stride 1 to bs, the code must be added before the quadrature loop. if dof_access_map: pre_code += [L.ArrayDecl(dof_access.array, sizes=num_dofs)] pre_body = [L.Assign(dof_access, dof_access_map)] pre_code += [L.ForRange(ic, 0, num_dofs, pre_body)] else: dof_access = self.symbols.coefficient_dof_access(mt.terminal, ic * bs + begin) body = [L.AssignAdd(access, dof_access * FE[ic])] code += [L.VariableDecl(access, 0.0)] code += [L.ForRange(ic, 0, num_dofs, body)] return pre_code, code def constant(self, t, mt, tabledata, quadrature_rule, access): # Constants are not defined within the kernel. # No definition is needed because access to them is directly # via symbol c[], i.e. as passed into the kernel. return [], [] def _define_coordinate_dofs_lincomb(self, e, mt, tabledata, quadrature_rule, access): """Define x or J as a linear combination of coordinate dofs with given table data.""" # Get properties of domain domain = ufl.domain.extract_unique_domain(mt.terminal) coordinate_element = domain.ufl_coordinate_element() num_scalar_dofs = convert_element(coordinate_element).sub_element.dim num_dofs = tabledata.values.shape[3] begin = tabledata.offset assert num_scalar_dofs == num_dofs # Find table name ttype = tabledata.ttype assert ttype != "zeros" assert ttype != "ones" # Get access to element table FE = self.symbols.element_table(tabledata, self.entitytype, mt.restriction) ic = self.symbols.coefficient_dof_sum_index dof_access = self.symbols.domain_dof_access(ic, begin, 3, num_scalar_dofs, mt.restriction) code = [] body = [L.AssignAdd(access, dof_access * FE[ic])] code += [L.VariableDecl(access, 0.0)] code += [L.ForRange(ic, 0, num_scalar_dofs, body)] return [], code def spatial_coordinate(self, e, mt, tabledata, quadrature_rule, access): """Return definition code for the physical spatial coordinates. If physical coordinates are given: No definition needed. If reference coordinates are given: x = sum_k xdof_k xphi_k(X) If reference facet coordinates are given: x = sum_k xdof_k xphi_k(Xf) """ if self.integral_type in ufl.custom_integral_types: # FIXME: Jacobian may need adjustment for custom_integral_types if mt.local_derivatives: logging.exception("FIXME: Jacobian in custom integrals is not implemented.") return [] else: return self._define_coordinate_dofs_lincomb(e, mt, tabledata, quadrature_rule, access) def jacobian(self, e, mt, tabledata, quadrature_rule, access): """Return definition code for the Jacobian of x(X).""" return self._define_coordinate_dofs_lincomb(e, mt, tabledata, quadrature_rule, access) def _expect_table(self, e, mt, tabledata, quadrature_rule, access): """Return quantities referring to constant tables defined in the generated code.""" # TODO: Inject const static table here instead? return [], [] def _expect_physical_coords(self, e, mt, tabledata, quadrature_rule, access): """Return quantities referring to coordinate_dofs.""" # TODO: Generate more efficient inline code for Max/MinCell/FacetEdgeLength # and CellDiameter here rather than lowering these quantities? return [], [] def _pass(self, *args, **kwargs): """Return nothing.""" return [], [] ffcx-0.7.0/ffcx/codegeneration/expression_generator.py000066400000000000000000000356641450721277100232230ustar00rootroot00000000000000# Copyright (C) 2019 Michal Habera # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import collections import logging from itertools import product from typing import Any, DefaultDict, Dict, Set import ufl from ffcx.codegeneration import geometry from ffcx.codegeneration.backend import FFCXBackend import ffcx.codegeneration.lnodes as L from ffcx.codegeneration.lnodes import LNode from ffcx.ir.representation import ExpressionIR logger = logging.getLogger("ffcx") class ExpressionGenerator: def __init__(self, ir: ExpressionIR, backend: FFCXBackend): if len(list(ir.integrand.keys())) != 1: raise RuntimeError("Only one set of points allowed for expression evaluation") self.ir = ir self.backend = backend self.scope: Dict[Any, LNode] = {} self._ufl_names: Set[Any] = set() self.symbol_counters: DefaultDict[Any, int] = collections.defaultdict(int) self.shared_symbols: Dict[Any, Any] = {} self.quadrature_rule = list(self.ir.integrand.keys())[0] def generate(self): parts = [] parts += self.generate_element_tables() # Generate the tables of geometry data that are needed parts += self.generate_geometry_tables() parts += self.generate_piecewise_partition() all_preparts = [] all_quadparts = [] preparts, quadparts = self.generate_quadrature_loop() all_preparts += preparts all_quadparts += quadparts # Collect parts before, during, and after quadrature loops parts += all_preparts parts += all_quadparts return L.StatementList(parts) def generate_geometry_tables(self): """Generate static tables of geometry data.""" # Currently we only support circumradius ufl_geometry = { ufl.geometry.ReferenceCellVolume: "reference_cell_volume", } cells: Dict[Any, Set[Any]] = {t: set() for t in ufl_geometry.keys()} for integrand in self.ir.integrand.values(): for attr in integrand["factorization"].nodes.values(): mt = attr.get("mt") if mt is not None: t = type(mt.terminal) if t in ufl_geometry: cells[t].add(ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname()) parts = [] for i, cell_list in cells.items(): for c in cell_list: parts.append(geometry.write_table(ufl_geometry[i], c)) return parts def generate_element_tables(self): """Generate tables of FE basis evaluated at specified points.""" parts = [] tables = self.ir.unique_tables table_names = sorted(tables) for name in table_names: table = tables[name] symbol = L.Symbol(name, dtype=L.DataType.REAL) self.backend.symbols.element_tables[name] = symbol decl = L.ArrayDecl(symbol, sizes=table.shape, values=table, const=True) parts += [decl] # Add leading comment if there are any tables parts = L.commented_code_list(parts, [ "Precomputed values of basis functions", "FE* dimensions: [entities][points][dofs]", ]) return parts def generate_quadrature_loop(self): """Generate quadrature loop for this quadrature rule. In the context of expressions quadrature loop is not accumulated. """ # Generate varying partition body = self.generate_varying_partition() body = L.commented_code_list( body, f"Points loop body setup quadrature loop {self.quadrature_rule.id()}") # Generate dofblock parts, some of this # will be placed before or after quadloop preparts, quadparts = \ self.generate_dofblock_partition() body += quadparts # Wrap body in loop or scope if not body: # Could happen for integral with everything zero and optimized away quadparts = [] else: iq = self.backend.symbols.quadrature_loop_index num_points = self.quadrature_rule.points.shape[0] quadparts = [L.ForRange(iq, 0, num_points, body=body)] return preparts, quadparts def generate_varying_partition(self): """Generate factors of blocks which are not cellwise constant.""" # Get annotated graph of factorisation F = self.ir.integrand[self.quadrature_rule]["factorization"] arraysymbol = L.Symbol(f"sv_{self.quadrature_rule.id()}", dtype=L.DataType.SCALAR) parts = self.generate_partition(arraysymbol, F, "varying") parts = L.commented_code_list( parts, f"Unstructured varying computations for quadrature rule {self.quadrature_rule.id()}") return parts def generate_piecewise_partition(self): """Generate factors of blocks which are constant (i.e. do not depend on quadrature points).""" # Get annotated graph of factorisation F = self.ir.integrand[self.quadrature_rule]["factorization"] arraysymbol = L.Symbol("sp", dtype=L.DataType.SCALAR) parts = self.generate_partition(arraysymbol, F, "piecewise") parts = L.commented_code_list(parts, "Unstructured piecewise computations") return parts def generate_dofblock_partition(self): """Generate assignments of blocks multiplied with their factors into final tensor A.""" block_contributions = self.ir.integrand[self.quadrature_rule]["block_contributions"] preparts = [] quadparts = [] blocks = [(blockmap, blockdata) for blockmap, contributions in sorted(block_contributions.items()) for blockdata in contributions] for blockmap, blockdata in blocks: # Define code for block depending on mode block_preparts, block_quadparts = \ self.generate_block_parts(blockmap, blockdata) # Add definitions preparts.extend(block_preparts) # Add computations quadparts.extend(block_quadparts) return preparts, quadparts def generate_block_parts(self, blockmap, blockdata): """Generate and return code parts for a given block.""" # The parts to return preparts = [] quadparts = [] block_rank = len(blockmap) blockdims = tuple(len(dofmap) for dofmap in blockmap) ttypes = blockdata.ttypes if "zeros" in ttypes: raise RuntimeError("Not expecting zero arguments to be left in dofblock generation.") arg_indices = tuple(self.backend.symbols.argument_loop_index(i) for i in range(block_rank)) F = self.ir.integrand[self.quadrature_rule]["factorization"] assert not blockdata.transposed, "Not handled yet" components = ufl.product(self.ir.expression_shape) num_points = self.quadrature_rule.points.shape[0] A_shape = [num_points, components] + self.ir.tensor_shape A = self.backend.symbols.element_tensor iq = self.backend.symbols.quadrature_loop_index # Check if DOFs in dofrange are equally spaced. expand_loop = False for i, bm in enumerate(blockmap): for a, b in zip(bm[1:-1], bm[2:]): if b - a != bm[1] - bm[0]: expand_loop = True break else: continue break if expand_loop: # If DOFs in dofrange are not equally spaced, then expand out the for loop for A_indices, B_indices in zip(product(*blockmap), product(*[range(len(b)) for b in blockmap])): B_indices = tuple([iq] + list(B_indices)) A_indices = tuple([iq] + A_indices) for fi_ci in blockdata.factor_indices_comp_indices: f = self.get_var(F.nodes[fi_ci[0]]["expression"]) arg_factors = self.get_arg_factors(blockdata, block_rank, B_indices) Brhs = L.float_product([f] + arg_factors) multi_index = L.MultiIndex([A_indices[0], fi_ci[1]] + A_indices[1:], A_shape) quadparts.append(L.AssignAdd(A[multi_index], Brhs)) else: # Prepend dimensions of dofmap block with free index # for quadrature points and expression components B_indices = tuple([iq] + list(arg_indices)) # Fetch code to access modified arguments # An access to FE table data arg_factors = self.get_arg_factors(blockdata, block_rank, B_indices) # TODO: handle non-contiguous dof ranges A_indices = [] for bm, index in zip(blockmap, arg_indices): # TODO: switch order here? (optionally) offset = bm[0] if len(bm) == 1: A_indices.append(index + offset) else: block_size = bm[1] - bm[0] A_indices.append(block_size * index + offset) A_indices = tuple([iq] + A_indices) # Multiply collected factors # For each component of the factor expression # add result inside quadloop body = [] for fi_ci in blockdata.factor_indices_comp_indices: f = self.get_var(F.nodes[fi_ci[0]]["expression"]) Brhs = L.float_product([f] + arg_factors) indices = [A_indices[0], fi_ci[1]] + list(A_indices[1:]) multi_index = L.MultiIndex(indices, A_shape) body.append(L.AssignAdd(A[multi_index], Brhs)) for i in reversed(range(block_rank)): body = L.ForRange( B_indices[i + 1], 0, blockdims[i], body=body) quadparts += [body] return preparts, quadparts def get_arg_factors(self, blockdata, block_rank, indices): """Get argument factors (i.e. blocks). Options ---------- blockdata block_rank indices Indices used to index element tables """ arg_factors = [] for i in range(block_rank): mad = blockdata.ma_data[i] td = mad.tabledata mt = self.ir.integrand[self.quadrature_rule]["modified_arguments"][mad.ma_index] table = self.backend.symbols.element_table(td, self.ir.entitytype, mt.restriction) assert td.ttype != "zeros" if td.ttype == "ones": arg_factor = L.LiteralFloat(1.0) else: arg_factor = table[indices[i + 1]] arg_factors.append(arg_factor) return arg_factors def new_temp_symbol(self, basename): """Create a new code symbol named basename + running counter.""" name = "%s%d" % (basename, self.symbol_counters[basename]) self.symbol_counters[basename] += 1 return L.Symbol(name, dtype=L.DataType.SCALAR) def get_var(self, v): if v._ufl_is_literal_: return L.ufl_to_lnodes(v) f = self.scope.get(v) return f def generate_partition(self, symbol, F, mode): """Generate computations of factors of blocks.""" definitions = [] pre_definitions = dict() intermediates = [] use_symbol_array = True for i, attr in F.nodes.items(): if attr['status'] != mode: continue v = attr['expression'] mt = attr.get('mt') if v._ufl_is_literal_: vaccess = L.ufl_to_lnodes(v) elif mt is not None: # All finite element based terminals have table data, as well # as some, but not all, of the symbolic geometric terminals tabledata = attr.get('tr') # Backend specific modified terminal translation vaccess = self.backend.access.get(mt.terminal, mt, tabledata, 0) predef, vdef = self.backend.definitions.get(mt.terminal, mt, tabledata, 0, vaccess) if predef: pre_definitions[str(predef[0].symbol.name)] = predef # Store definitions of terminals in list assert isinstance(vdef, list) definitions.extend(vdef) else: # Get previously visited operands vops = [self.get_var(op) for op in v.ufl_operands] # get parent operand pid = F.in_edges[i][0] if F.in_edges[i] else -1 if pid and pid > i: parent_exp = F.nodes.get(pid)['expression'] else: parent_exp = None # Mapping UFL operator to target language self._ufl_names.add(v._ufl_handler_name_) vexpr = L.ufl_to_lnodes(v, *vops) # Create a new intermediate for each subexpression # except boolean conditions and its childs if isinstance(parent_exp, ufl.classes.Condition): # Skip intermediates for 'x' and 'y' in x i: parent_exp = F.nodes.get(pid)['expression'] else: parent_exp = None # Mapping UFL operator to target language self._ufl_names.add(v._ufl_handler_name_) vexpr = L.ufl_to_lnodes(v, *vops) # Create a new intermediate for each subexpression # except boolean conditions and its childs if isinstance(parent_exp, ufl.classes.Condition): # Skip intermediates for 'x' and 'y' in x 1: raise RuntimeError("Code generation for non-scalar integrals unsupported") # We have scalar integrand here, take just the factor index factor_index = blockdata.factor_indices_comp_indices[0][0] # Get factor expression F = self.ir.integrand[quadrature_rule]["factorization"] v = F.nodes[factor_index]['expression'] f = self.get_var(quadrature_rule, v) # Quadrature weight was removed in representation, add it back now if self.ir.integral_type in ufl.custom_integral_types: weights = self.backend.symbols.custom_weights_table weight = weights[iq] else: weights = self.backend.symbols.weights_table(quadrature_rule) weight = weights[iq] # Define fw = f * weight fw_rhs = L.float_product([f, weight]) if not isinstance(fw_rhs, L.Product): fw = fw_rhs else: # Define and cache scalar temp variable key = (quadrature_rule, factor_index, blockdata.all_factors_piecewise) fw, defined = self.get_temp_symbol("fw", key) if not defined: quadparts.append(L.VariableDecl(fw, fw_rhs)) assert not blockdata.transposed, "Not handled yet" # Fetch code to access modified arguments arg_factors = self.get_arg_factors(blockdata, block_rank, quadrature_rule, iq, B_indices) B_rhs = L.float_product([fw] + arg_factors) A_indices = [] for i in range(block_rank): offset = blockdata.ma_data[i].tabledata.offset index = arg_indices[i] if len(blockmap[i]) == 1: A_indices.append(index + offset) else: block_size = blockdata.ma_data[i].tabledata.block_size A_indices.append(block_size * index + offset) rhs_expressions[tuple(A_indices)].append(B_rhs) # List of statements to keep in the inner loop keep = collections.defaultdict(list) # List of temporary array declarations pre_loop: List[LNode] = [] # List of loop invariant expressions to hoist hoist: List[BinOp] = [] for indices in rhs_expressions: hoist_rhs = collections.defaultdict(list) # Hoist loop invariant code and group array access (each # table should only be read one time in the inner loop) if block_rank == 2: ind = B_indices[-1] for rhs in rhs_expressions[indices]: if len(rhs.args) <= 2: keep[indices].append(rhs) else: varying = next((x for x in rhs.args if hasattr(x, 'indices') and (ind in x.indices)), None) if varying: invariant = [x for x in rhs.args if x is not varying] hoist_rhs[varying].append(invariant) else: keep[indices].append(rhs) # Perform algebraic manipulations to reduce number of # floating point operations (factorize expressions by # grouping) for statement in hoist_rhs: sum = L.Sum([L.float_product(rhs) for rhs in hoist_rhs[statement]]) lhs = None for h in hoist: if h.rhs == sum: lhs = h.lhs break if lhs: keep[indices].append(L.float_product([statement, lhs])) else: t = self.new_temp_symbol("t") pre_loop.append(L.ArrayDecl(t, sizes=blockdims[0])) keep[indices].append(L.float_product([statement, t[B_indices[0]]])) hoist.append(L.Assign(t[B_indices[i - 1]], sum)) else: keep[indices] = rhs_expressions[indices] hoist_code: List[LNode] = [L.ForRange(B_indices[0], 0, blockdims[0], body=hoist)] if hoist else [] body: List[LNode] = [] A = self.backend.symbols.element_tensor A_shape = self.ir.tensor_shape for indices in keep: multi_index = L.MultiIndex(list(indices), A_shape) body.append(L.AssignAdd(A[multi_index], L.Sum(keep[indices]))) for i in reversed(range(block_rank)): body = [L.ForRange(B_indices[i], 0, blockdims[i], body=body)] quadparts += pre_loop quadparts += hoist_code quadparts += body return preparts, quadparts def fuse_loops(self, definitions): """Merge a sequence of loops with the same iteration space into a single loop. Loop fusion improves data locality, cache reuse and decreases the loop control overhead. NOTE: Loop fusion might increase the pressure on register allocation. Ideally, we should define a cost function to determine how many loops should fuse at a time. """ loops = collections.defaultdict(list) pre_loop = [] for access, definition in definitions.items(): for d in definition: if isinstance(d, L.ForRange): loops[(d.index, d.begin, d.end)] += [d.body] else: pre_loop += [d] fused = [] for info, body in loops.items(): index, begin, end = info fused += [L.ForRange(index, begin, end, body)] code = [] code += pre_loop code += fused return code ffcx-0.7.0/ffcx/codegeneration/jit.py000066400000000000000000000313421450721277100175310ustar00rootroot00000000000000# Copyright (C) 2004-2019 Garth N. Wells # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import importlib import io import logging import os import re import sysconfig import tempfile import time from contextlib import redirect_stdout from pathlib import Path import cffi import ffcx import ffcx.naming logger = logging.getLogger("ffcx") root_logger = logging.getLogger() # Get declarations directly from ufcx.h file_dir = os.path.dirname(os.path.abspath(__file__)) with open(file_dir + "/ufcx.h", "r") as f: ufcx_h = ''.join(f.readlines()) header = ufcx_h.split("")[1].split("")[0].strip(" /\n") header = header.replace("{", "{{").replace("}", "}}") UFC_HEADER_DECL = header + "\n" UFC_ELEMENT_DECL = '\n'.join(re.findall('typedef struct ufcx_finite_element.*?ufcx_finite_element;', ufcx_h, re.DOTALL)) UFC_DOFMAP_DECL = '\n'.join(re.findall('typedef struct ufcx_dofmap.*?ufcx_dofmap;', ufcx_h, re.DOTALL)) UFC_FORM_DECL = '\n'.join(re.findall('typedef struct ufcx_form.*?ufcx_form;', ufcx_h, re.DOTALL)) UFC_INTEGRAL_DECL = '\n'.join(re.findall(r'typedef void ?\(ufcx_tabulate_tensor_float32\).*?\);', ufcx_h, re.DOTALL)) UFC_INTEGRAL_DECL += '\n'.join(re.findall(r'typedef void ?\(ufcx_tabulate_tensor_float64\).*?\);', ufcx_h, re.DOTALL)) UFC_INTEGRAL_DECL += '\n'.join(re.findall(r'typedef void ?\(ufcx_tabulate_tensor_complex64\).*?\);', ufcx_h, re.DOTALL)) UFC_INTEGRAL_DECL += '\n'.join(re.findall(r'typedef void ?\(ufcx_tabulate_tensor_complex128\).*?\);', ufcx_h, re.DOTALL)) UFC_INTEGRAL_DECL += '\n'.join(re.findall(r'typedef void ?\(ufcx_tabulate_tensor_longdouble\).*?\);', ufcx_h, re.DOTALL)) UFC_INTEGRAL_DECL += '\n'.join(re.findall('typedef struct ufcx_integral.*?ufcx_integral;', ufcx_h, re.DOTALL)) UFC_EXPRESSION_DECL = '\n'.join(re.findall('typedef struct ufcx_expression.*?ufcx_expression;', ufcx_h, re.DOTALL)) def _compute_option_signature(options): """Return options signature (some options should not affect signature).""" return str(sorted(options.items())) def get_cached_module(module_name, object_names, cache_dir, timeout): """Look for an existing C file and wait for compilation, or if it does not exist, create it.""" cache_dir = Path(cache_dir) c_filename = cache_dir.joinpath(module_name).with_suffix(".c") ready_name = c_filename.with_suffix(".c.cached") # Ensure cache dir exists cache_dir.mkdir(exist_ok=True, parents=True) try: # Create C file with exclusive access open(c_filename, "x") return None, None except FileExistsError: logger.info("Cached C file already exists: " + str(c_filename)) finder = importlib.machinery.FileFinder( str(cache_dir), (importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES)) finder.invalidate_caches() # Now, wait for ready for i in range(timeout): if os.path.exists(ready_name): spec = finder.find_spec(module_name) if spec is None: raise ModuleNotFoundError("Unable to find JIT module.") compiled_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(compiled_module) compiled_objects = [getattr(compiled_module.lib, name) for name in object_names] return compiled_objects, compiled_module logger.info(f"Waiting for {ready_name} to appear.") time.sleep(1) raise TimeoutError(f"""JIT compilation timed out, probably due to a failed previous compile. Try cleaning cache (e.g. remove {c_filename}) or increase timeout option.""") def _compilation_signature(cffi_extra_compile_args=None, cffi_debug=None): """Compute the compilation-inputs part of the signature. Used to avoid cache conflicts across Python versions, architectures, installs. - SOABI includes platform, Python version, debug flags - CFLAGS includes prefixes, arch targets """ return ( str(cffi_extra_compile_args) + str(cffi_debug) + sysconfig.get_config_var("CFLAGS") + sysconfig.get_config_var("SOABI") ) def compile_elements(elements, options=None, cache_dir=None, timeout=10, cffi_extra_compile_args=None, cffi_verbose=False, cffi_debug=None, cffi_libraries=None): """Compile a list of UFL elements and dofmaps into Python objects.""" p = ffcx.options.get_options(options) # Get a signature for these elements module_name = 'libffcx_elements_' + \ ffcx.naming.compute_signature(elements, _compute_option_signature(p) + _compilation_signature(cffi_extra_compile_args, cffi_debug)) names = [] for e in elements: name = ffcx.naming.finite_element_name(e, module_name) names.append(name) name = ffcx.naming.dofmap_name(e, module_name) names.append(name) if cache_dir is not None: cache_dir = Path(cache_dir) obj, mod = get_cached_module(module_name, names, cache_dir, timeout) if obj is not None: # Pair up elements with dofmaps obj = list(zip(obj[::2], obj[1::2])) return obj, mod, (None, None) else: cache_dir = Path(tempfile.mkdtemp()) try: decl = UFC_HEADER_DECL.format(p["scalar_type"]) + UFC_ELEMENT_DECL + UFC_DOFMAP_DECL element_template = "extern ufcx_finite_element {name};\n" dofmap_template = "extern ufcx_dofmap {name};\n" for i in range(len(elements)): decl += element_template.format(name=names[i * 2]) decl += dofmap_template.format(name=names[i * 2 + 1]) impl = _compile_objects(decl, elements, names, module_name, p, cache_dir, cffi_extra_compile_args, cffi_verbose, cffi_debug, cffi_libraries) except Exception as e: try: # remove c file so that it will not timeout next time c_filename = cache_dir.joinpath(module_name + ".c") os.replace(c_filename, c_filename.with_suffix(".c.failed")) except Exception: pass raise e objects, module = _load_objects(cache_dir, module_name, names) # Pair up elements with dofmaps objects = list(zip(objects[::2], objects[1::2])) return objects, module, (decl, impl) def compile_forms(forms, options=None, cache_dir=None, timeout=10, cffi_extra_compile_args=None, cffi_verbose=False, cffi_debug=None, cffi_libraries=None): """Compile a list of UFL forms into UFC Python objects.""" p = ffcx.options.get_options(options) # Get a signature for these forms module_name = 'libffcx_forms_' + \ ffcx.naming.compute_signature(forms, _compute_option_signature(p) + _compilation_signature(cffi_extra_compile_args, cffi_debug)) form_names = [ffcx.naming.form_name(form, i, module_name) for i, form in enumerate(forms)] if cache_dir is not None: cache_dir = Path(cache_dir) obj, mod = get_cached_module(module_name, form_names, cache_dir, timeout) if obj is not None: return obj, mod, (None, None) else: cache_dir = Path(tempfile.mkdtemp()) try: decl = UFC_HEADER_DECL.format(p["scalar_type"]) + UFC_ELEMENT_DECL + UFC_DOFMAP_DECL + \ UFC_INTEGRAL_DECL + UFC_FORM_DECL form_template = "extern ufcx_form {name};\n" for name in form_names: decl += form_template.format(name=name) impl = _compile_objects(decl, forms, form_names, module_name, p, cache_dir, cffi_extra_compile_args, cffi_verbose, cffi_debug, cffi_libraries) except Exception as e: try: # remove c file so that it will not timeout next time c_filename = cache_dir.joinpath(module_name + ".c") os.replace(c_filename, c_filename.with_suffix(".c.failed")) except Exception: pass raise e obj, module = _load_objects(cache_dir, module_name, form_names) return obj, module, (decl, impl) def compile_expressions(expressions, options=None, cache_dir=None, timeout=10, cffi_extra_compile_args=None, cffi_verbose=False, cffi_debug=None, cffi_libraries=None): """Compile a list of UFL expressions into UFC Python objects. Options ---------- expressions List of (UFL expression, evaluation points). """ p = ffcx.options.get_options(options) module_name = 'libffcx_expressions_' + \ ffcx.naming.compute_signature(expressions, _compute_option_signature(p) + _compilation_signature(cffi_extra_compile_args, cffi_debug)) expr_names = [ffcx.naming.expression_name(expression, module_name) for expression in expressions] if cache_dir is not None: cache_dir = Path(cache_dir) obj, mod = get_cached_module(module_name, expr_names, cache_dir, timeout) if obj is not None: return obj, mod, (None, None) else: cache_dir = Path(tempfile.mkdtemp()) try: decl = UFC_HEADER_DECL.format(p["scalar_type"]) + UFC_ELEMENT_DECL + UFC_DOFMAP_DECL + \ UFC_INTEGRAL_DECL + UFC_FORM_DECL + UFC_EXPRESSION_DECL expression_template = "extern ufcx_expression {name};\n" for name in expr_names: decl += expression_template.format(name=name) impl = _compile_objects(decl, expressions, expr_names, module_name, p, cache_dir, cffi_extra_compile_args, cffi_verbose, cffi_debug, cffi_libraries) except Exception as e: try: # remove c file so that it will not timeout next time c_filename = cache_dir.joinpath(module_name + ".c") os.replace(c_filename, c_filename.with_suffix(".c.failed")) except Exception: pass raise e obj, module = _load_objects(cache_dir, module_name, expr_names) return obj, module, (decl, impl) def _compile_objects(decl, ufl_objects, object_names, module_name, options, cache_dir, cffi_extra_compile_args, cffi_verbose, cffi_debug, cffi_libraries): import ffcx.compiler # JIT uses module_name as prefix, which is needed to make names of all struct/function # unique across modules _, code_body = ffcx.compiler.compile_ufl_objects(ufl_objects, prefix=module_name, options=options) ffibuilder = cffi.FFI() ffibuilder.set_source(module_name, code_body, include_dirs=[ffcx.codegeneration.get_include_path()], extra_compile_args=cffi_extra_compile_args, libraries=cffi_libraries) ffibuilder.cdef(decl) c_filename = cache_dir.joinpath(module_name + ".c") ready_name = c_filename.with_suffix(".c.cached") # Compile (ensuring that compile dir exists) cache_dir.mkdir(exist_ok=True, parents=True) logger.info(79 * "#") logger.info("Calling JIT C compiler") logger.info(79 * "#") t0 = time.time() f = io.StringIO() # Temporarily set root logger handlers to string buffer only # since CFFI logs into root logger old_handlers = root_logger.handlers.copy() root_logger.handlers = [logging.StreamHandler(f)] with redirect_stdout(f): ffibuilder.compile(tmpdir=cache_dir, verbose=True, debug=cffi_debug) s = f.getvalue() if (cffi_verbose): print(s) logger.info("JIT C compiler finished in {:.4f}".format(time.time() - t0)) # Create a "status ready" file. If this fails, it is an error, # because it should not exist yet. # Copy the stdout verbose output of the build into the ready file fd = open(ready_name, "x") fd.write(s) fd.close() # Copy back the original handlers (in case someone is logging into # root logger and has custom handlers) root_logger.handlers = old_handlers return code_body def _load_objects(cache_dir, module_name, object_names): # Create module finder that searches the compile path finder = importlib.machinery.FileFinder( str(cache_dir), (importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES)) # Find module. Clear search cache to be sure dynamically created # (new) modules are found finder.invalidate_caches() spec = finder.find_spec(module_name) if spec is None: raise ModuleNotFoundError("Unable to find JIT module.") # Load module compiled_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(compiled_module) compiled_objects = [] for name in object_names: obj = getattr(compiled_module.lib, name) compiled_objects.append(obj) return compiled_objects, compiled_module ffcx-0.7.0/ffcx/codegeneration/lnodes.py000066400000000000000000000606321450721277100202330ustar00rootroot00000000000000# Copyright (C) 2013-2023 Martin Sandve Alnæs, Chris Richardson # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import numbers import ufl import numpy as np from enum import Enum class PRECEDENCE: """An enum-like class for operator precedence levels.""" HIGHEST = 0 LITERAL = 0 SYMBOL = 0 SUBSCRIPT = 2 NOT = 3 NEG = 3 MUL = 4 DIV = 4 ADD = 5 SUB = 5 LT = 7 LE = 7 GT = 7 GE = 7 EQ = 8 NE = 8 AND = 11 OR = 12 CONDITIONAL = 13 ASSIGN = 13 LOWEST = 15 """LNodes is intended as a minimal generic language description. Formatting is done later, depending on the target language. Supported: Floating point (and complex) and integer variables and multidimensional arrays Range loops Simple arithmetic, +-*/ Math operations Logic conditions Comments Not supported: Pointers Function Calls Flow control (if, switch, while) Booleans Strings """ def is_zero_lexpr(lexpr): return (isinstance(lexpr, LiteralFloat) and lexpr.value == 0.0) or ( isinstance(lexpr, LiteralInt) and lexpr.value == 0 ) def is_one_lexpr(lexpr): return (isinstance(lexpr, LiteralFloat) and lexpr.value == 1.0) or ( isinstance(lexpr, LiteralInt) and lexpr.value == 1 ) def is_negative_one_lexpr(lexpr): return (isinstance(lexpr, LiteralFloat) and lexpr.value == -1.0) or ( isinstance(lexpr, LiteralInt) and lexpr.value == -1 ) def float_product(factors): """Build product of float factors, simplifying ones and zeros and returning 1.0 if empty sequence.""" factors = [f for f in factors if not is_one_lexpr(f)] if len(factors) == 0: return LiteralFloat(1.0) elif len(factors) == 1: return factors[0] else: for f in factors: if is_zero_lexpr(f): return f return Product(factors) class DataType(Enum): """Representation of data types for variables in LNodes. These can be REAL (same type as geometry), SCALAR (same type as tensor), or INT (for entity indices etc.) """ REAL = 0 SCALAR = 1 INT = 2 NONE = 3 def merge_dtypes(dtype0, dtype1): # Promote dtype to SCALAR or REAL if either argument matches if DataType.NONE in (dtype0, dtype1): raise ValueError(f"Invalid DataType in LNodes {dtype0, dtype1}") if DataType.SCALAR in (dtype0, dtype1): return DataType.SCALAR elif DataType.REAL in (dtype0, dtype1): return DataType.REAL elif (dtype0 == DataType.INT and dtype1 == DataType.INT): return DataType.INT else: raise ValueError(f"Can't get dtype for binary operation with {dtype0, dtype1}") class LNode(object): """Base class for all AST nodes.""" def __eq__(self, other): name = self.__class__.__name__ raise NotImplementedError("Missing implementation of __eq__ in " + name) def __ne__(self, other): return not self.__eq__(other) class LExpr(LNode): """Base class for all expressions. All subtypes should define a 'precedence' class attribute. """ dtype = DataType.NONE def __getitem__(self, indices): return ArrayAccess(self, indices) def __neg__(self): if isinstance(self, LiteralFloat): return LiteralFloat(-self.value) if isinstance(self, LiteralInt): return LiteralInt(-self.value) return Neg(self) def __add__(self, other): other = as_lexpr(other) if is_zero_lexpr(self): return other if is_zero_lexpr(other): return self if isinstance(other, Neg): return Sub(self, other.arg) return Add(self, other) def __radd__(self, other): other = as_lexpr(other) if is_zero_lexpr(self): return other if is_zero_lexpr(other): return self if isinstance(self, Neg): return Sub(other, self.arg) return Add(other, self) def __sub__(self, other): other = as_lexpr(other) if is_zero_lexpr(self): return -other if is_zero_lexpr(other): return self if isinstance(other, Neg): return Add(self, other.arg) if isinstance(self, LiteralInt) and isinstance(other, LiteralInt): return LiteralInt(self.value - other.value) return Sub(self, other) def __rsub__(self, other): other = as_lexpr(other) if is_zero_lexpr(self): return other if is_zero_lexpr(other): return -self if isinstance(self, Neg): return Add(other, self.arg) return Sub(other, self) def __mul__(self, other): other = as_lexpr(other) if is_zero_lexpr(self): return self if is_zero_lexpr(other): return other if is_one_lexpr(self): return other if is_one_lexpr(other): return self if is_negative_one_lexpr(other): return Neg(self) if is_negative_one_lexpr(self): return Neg(other) if isinstance(self, LiteralInt) and isinstance(other, LiteralInt): return LiteralInt(self.value * other.value) return Mul(self, other) def __rmul__(self, other): other = as_lexpr(other) if is_zero_lexpr(self): return self if is_zero_lexpr(other): return other if is_one_lexpr(self): return other if is_one_lexpr(other): return self if is_negative_one_lexpr(other): return Neg(self) if is_negative_one_lexpr(self): return Neg(other) return Mul(other, self) def __div__(self, other): other = as_lexpr(other) if is_zero_lexpr(other): raise ValueError("Division by zero!") if is_zero_lexpr(self): return self return Div(self, other) def __rdiv__(self, other): other = as_lexpr(other) if is_zero_lexpr(self): raise ValueError("Division by zero!") if is_zero_lexpr(other): return other return Div(other, self) # TODO: Error check types? __truediv__ = __div__ __rtruediv__ = __rdiv__ __floordiv__ = __div__ __rfloordiv__ = __rdiv__ class LExprOperator(LExpr): """Base class for all expression operators.""" sideeffect = False class LExprTerminal(LExpr): """Base class for all expression terminals.""" sideeffect = False # LExprTerminal types class LiteralFloat(LExprTerminal): """A floating point literal value.""" precedence = PRECEDENCE.LITERAL def __init__(self, value): assert isinstance(value, (float, complex)) self.value = value if isinstance(value, complex): self.dtype = DataType.SCALAR else: self.dtype = DataType.REAL def __eq__(self, other): return isinstance(other, LiteralFloat) and self.value == other.value def __float__(self): return float(self.value) def __repr__(self): return str(self.value) class LiteralInt(LExprTerminal): """An integer literal value.""" precedence = PRECEDENCE.LITERAL def __init__(self, value): assert isinstance(value, (int, np.number)) self.value = value self.dtype = DataType.INT def __eq__(self, other): return isinstance(other, LiteralInt) and self.value == other.value def __hash__(self): return hash(self.value) def __repr__(self): return str(self.value) class Symbol(LExprTerminal): """A named symbol.""" precedence = PRECEDENCE.SYMBOL def __init__(self, name: str, dtype): assert isinstance(name, str) assert name.replace("_", "").isalnum() self.name = name self.dtype = dtype def __eq__(self, other): return isinstance(other, Symbol) and self.name == other.name def __hash__(self): return hash(self.name) def __repr__(self): return self.name class MultiIndex(LExpr): """A multi-index for accessing tensors flattened in memory.""" def __init__(self, symbols: list, sizes: list): self.dtype = DataType.INT self.sizes = sizes self.symbols = [as_lexpr(sym) for sym in symbols] for sym in self.symbols: assert sym.dtype == DataType.INT dim = len(sizes) if dim == 0: self.global_index: LExpr = LiteralInt(0) else: stride = [np.prod(sizes[i:]) for i in range(dim)] + [LiteralInt(1)] self.global_index = Sum(n * sym for n, sym in zip(stride[1:], symbols)) def size(self): return np.prod(self.sizes) def local_index(self, idx): assert idx < len(self.symbols) return self.symbols[idx] def intersection(self, other): symbols = [] sizes = [] for (sym, size) in zip(self.symbols, self.sizes): if sym in other.symbols: i = other.symbols.index(sym) assert other.sizes[i] == size symbols.append(sym) sizes.append(size) return MultiIndex(symbols, sizes) def union(self, other): # NB result may depend on order a.union(b) != b.union(a) symbols = self.symbols.copy() sizes = self.sizes.copy() for (sym, size) in zip(other.symbols, other.sizes): if sym in symbols: i = symbols.index(sym) assert sizes[i] == size else: symbols.append(sym) sizes.append(size) return MultiIndex(symbols, sizes) def difference(self, other): symbols = [] sizes = [] for (idx, size) in zip(self.symbols, self.sizes): if idx not in other.symbols: symbols.append(idx) sizes.append(size) return MultiIndex(symbols, sizes) def __hash__(self): return hash(self.global_idx) class PrefixUnaryOp(LExprOperator): """Base class for unary operators.""" def __init__(self, arg): self.arg = as_lexpr(arg) def __eq__(self, other): return isinstance(other, type(self)) and self.arg == other.arg class BinOp(LExprOperator): def __init__(self, lhs, rhs): self.lhs = as_lexpr(lhs) self.rhs = as_lexpr(rhs) def __eq__(self, other): return ( isinstance(other, type(self)) and self.lhs == other.lhs and self.rhs == other.rhs ) def __hash__(self): return hash(self.lhs) + hash(self.rhs) def __repr__(self): return f"({self.lhs} {self.op} {self.rhs})" class ArithmeticBinOp(BinOp): def __init__(self, lhs, rhs): self.lhs = as_lexpr(lhs) self.rhs = as_lexpr(rhs) self.dtype = merge_dtypes(self.lhs.dtype, self.rhs.dtype) class NaryOp(LExprOperator): """Base class for special n-ary operators.""" op = "" def __init__(self, args): self.args = [as_lexpr(arg) for arg in args] def __eq__(self, other): return ( isinstance(other, type(self)) and len(self.args) == len(other.args) and all(a == b for a, b in zip(self.args, other.args)) ) def __repr__(self) -> str: return f"{self.op} ".join(f"{i} " for i in self.args) class Neg(PrefixUnaryOp): precedence = PRECEDENCE.NEG op = "-" def __init__(self, arg): self.arg = as_lexpr(arg) self.dtype = self.arg.dtype class Not(PrefixUnaryOp): precedence = PRECEDENCE.NOT op = "!" # Binary operators # Arithmetic operators preserve the dtype of their operands # The other operations (logical) do not need a dtype class Add(ArithmeticBinOp): precedence = PRECEDENCE.ADD op = "+" class Sub(ArithmeticBinOp): precedence = PRECEDENCE.SUB op = "-" class Mul(ArithmeticBinOp): precedence = PRECEDENCE.MUL op = "*" class Div(ArithmeticBinOp): precedence = PRECEDENCE.DIV op = "/" class EQ(BinOp): precedence = PRECEDENCE.EQ op = "==" class NE(BinOp): precedence = PRECEDENCE.NE op = "!=" class LT(BinOp): precedence = PRECEDENCE.LT op = "<" class GT(BinOp): precedence = PRECEDENCE.GT op = ">" class LE(BinOp): precedence = PRECEDENCE.LE op = "<=" class GE(BinOp): precedence = PRECEDENCE.GE op = ">=" class And(BinOp): precedence = PRECEDENCE.AND op = "&&" class Or(BinOp): precedence = PRECEDENCE.OR op = "||" class Sum(NaryOp): """Sum of any number of operands.""" precedence = PRECEDENCE.ADD op = "+" class Product(NaryOp): """Product of any number of operands.""" precedence = PRECEDENCE.MUL op = "*" class MathFunction(LExprOperator): """A Math Function, with any arguments.""" precedence = PRECEDENCE.HIGHEST def __init__(self, func, args): self.function = func self.args = [as_lexpr(arg) for arg in args] self.dtype = self.args[0].dtype def __eq__(self, other): return ( isinstance(other, type(self)) and self.function == other.function and len(self.args) == len(other.args) and all(a == b for a, b in zip(self.args, other.args)) ) class AssignOp(BinOp): """Base class for assignment operators.""" precedence = PRECEDENCE.ASSIGN sideeffect = True def __init__(self, lhs, rhs): assert isinstance(lhs, LNode) BinOp.__init__(self, lhs, rhs) class Assign(AssignOp): op = "=" class AssignAdd(AssignOp): op = "+=" class AssignSub(AssignOp): op = "-=" class AssignMul(AssignOp): op = "*=" class AssignDiv(AssignOp): op = "/=" class ArrayAccess(LExprOperator): precedence = PRECEDENCE.SUBSCRIPT def __init__(self, array, indices): # Typecheck array argument if isinstance(array, Symbol): self.array = array self.dtype = array.dtype elif isinstance(array, ArrayDecl): self.array = array.symbol self.dtype = array.symbol.dtype else: raise ValueError("Unexpected array type %s." % (type(array).__name__,)) # Allow expressions or literals as indices if not isinstance(indices, (list, tuple)): indices = (indices,) self.indices = tuple(as_lexpr(i) for i in indices) # Early error checking for negative array dimensions if any(isinstance(i, int) and i < 0 for i in self.indices): raise ValueError("Index value < 0.") # Additional dimension checks possible if we get an ArrayDecl instead of just a name if isinstance(array, ArrayDecl): if len(self.indices) != len(array.sizes): raise ValueError("Invalid number of indices.") ints = (int, LiteralInt) if any( (isinstance(i, ints) and isinstance(d, ints) and int(i) >= int(d)) for i, d in zip(self.indices, array.sizes) ): raise ValueError("Index value >= array dimension.") def __getitem__(self, indices): """Handle nested expr[i][j].""" if isinstance(indices, list): indices = tuple(indices) elif not isinstance(indices, tuple): indices = (indices,) return ArrayAccess(self.array, self.indices + indices) def __eq__(self, other): return ( isinstance(other, type(self)) and self.array == other.array and self.indices == other.indices ) def __hash__(self): return hash(self.array) def __repr__(self): return str(self.array) + "[" + ", ".join(str(i) for i in self.indices) + "]" class Conditional(LExprOperator): precedence = PRECEDENCE.CONDITIONAL def __init__(self, condition, true, false): self.condition = as_lexpr(condition) self.true = as_lexpr(true) self.false = as_lexpr(false) self.dtype = merge_dtypes(self.true.dtype, self.false.dtype) def __eq__(self, other): return ( isinstance(other, type(self)) and self.condition == other.condition and self.true == other.true and self.false == other.false ) def as_lexpr(node): """Typechecks and wraps an object as a valid LExpr. Accepts LExpr nodes, treats int and float as literals. """ if isinstance(node, LExpr): return node elif isinstance(node, numbers.Integral): return LiteralInt(node) elif isinstance(node, numbers.Real): return LiteralFloat(node) else: raise RuntimeError("Unexpected LExpr type %s:\n%s" % (type(node), str(node))) class Statement(LNode): """Make an expression into a statement.""" is_scoped = False def __init__(self, expr): self.expr = as_lexpr(expr) def __eq__(self, other): return isinstance(other, type(self)) and self.expr == other.expr def as_statement(node): """Perform type checking on node and wrap in a suitable statement type if necessary.""" if isinstance(node, StatementList) and len(node.statements) == 1: # Cleans up the expression tree a bit return node.statements[0] elif isinstance(node, Statement): # No-op return node elif isinstance(node, LExprOperator): if node.sideeffect: # Special case for using assignment expressions as statements return Statement(node) else: raise RuntimeError( "Trying to create a statement of lexprOperator type %s:\n%s" % (type(node), str(node)) ) elif isinstance(node, list): # Convenience case for list of statements if len(node) == 1: # Cleans up the expression tree a bit return as_statement(node[0]) else: return StatementList(node) else: raise RuntimeError( "Unexpected Statement type %s:\n%s" % (type(node), str(node)) ) class StatementList(LNode): """A simple sequence of statements. No new scopes are introduced.""" def __init__(self, statements): self.statements = [as_statement(st) for st in statements] @property def is_scoped(self): return all(st.is_scoped for st in self.statements) def __eq__(self, other): return isinstance(other, type(self)) and self.statements == other.statements class Comment(Statement): """Line comment(s) used for annotating the generated code with human readable remarks.""" is_scoped = True def __init__(self, comment): assert isinstance(comment, str) self.comment = comment def __eq__(self, other): return isinstance(other, type(self)) and self.comment == other.comment def commented_code_list(code, comments): """Add comment to code list if the list is not empty.""" if isinstance(code, LNode): code = [code] assert isinstance(code, list) if code: if not isinstance(comments, (list, tuple)): comments = [comments] comments = [Comment(c) for c in comments] code = comments + code return code # Type and variable declarations class VariableDecl(Statement): """Declare a variable, optionally define initial value.""" is_scoped = False def __init__(self, symbol, value=None): assert isinstance(symbol, Symbol) assert symbol.dtype is not None self.symbol = symbol if value is not None: value = as_lexpr(value) self.value = value def __eq__(self, other): return ( isinstance(other, type(self)) and self.typename == other.typename and self.symbol == other.symbol and self.value == other.value ) class ArrayDecl(Statement): """A declaration or definition of an array. Note that just setting values=0 is sufficient to initialize the entire array to zero. Otherwise use nested lists of lists to represent multidimensional array values to initialize to. """ is_scoped = False def __init__(self, symbol, sizes=None, values=None, const=False): assert isinstance(symbol, Symbol) self.symbol = symbol assert symbol.dtype if sizes is None: assert values is not None sizes = values.shape if isinstance(sizes, int): sizes = (sizes,) self.sizes = tuple(sizes) if values is None: assert sizes is not None # NB! No type checking, assuming nested lists of literal values. Not applying as_lexpr. if isinstance(values, (list, tuple)): self.values = np.asarray(values) else: self.values = values self.const = const def __eq__(self, other): attributes = ("typename", "symbol", "sizes", "values") return isinstance(other, type(self)) and all( getattr(self, name) == getattr(self, name) for name in attributes ) def is_simple_inner_loop(code): if isinstance(code, ForRange) and is_simple_inner_loop(code.body): return True if isinstance(code, Statement) and isinstance(code.expr, AssignOp): return True return False class ForRange(Statement): """Slightly higher-level for loop assuming incrementing an index over a range.""" is_scoped = True def __init__(self, index, begin, end, body): assert isinstance(index, Symbol) self.index = index self.begin = as_lexpr(begin) self.end = as_lexpr(end) assert isinstance(body, list) self.body = StatementList(body) def __eq__(self, other): attributes = ("index", "begin", "end", "body") return isinstance(other, type(self)) and all( getattr(self, name) == getattr(self, name) for name in attributes ) def _math_function(op, *args): name = op._ufl_handler_name_ dtype = args[0].dtype if name in ("conj", "real") and dtype == DataType.REAL: assert len(args) == 1 return args[0] if name == "imag" and dtype == DataType.REAL: assert len(args) == 1 return LiteralFloat(0.0) return MathFunction(name, args) # Lookup table for handler to call when the ufl_to_lnodes method (below) is # called, depending on the first argument type. _ufl_call_lookup = { ufl.constantvalue.IntValue: lambda x: LiteralInt(int(x)), ufl.constantvalue.FloatValue: lambda x: LiteralFloat(float(x)), ufl.constantvalue.ComplexValue: lambda x: LiteralFloat(x.value()), ufl.constantvalue.Zero: lambda x: LiteralFloat(0.0), ufl.algebra.Product: lambda x, a, b: a * b, ufl.algebra.Sum: lambda x, a, b: a + b, ufl.algebra.Division: lambda x, a, b: a / b, ufl.algebra.Abs: _math_function, ufl.algebra.Power: _math_function, ufl.algebra.Real: _math_function, ufl.algebra.Imag: _math_function, ufl.algebra.Conj: _math_function, ufl.classes.GT: lambda x, a, b: GT(a, b), ufl.classes.GE: lambda x, a, b: GE(a, b), ufl.classes.EQ: lambda x, a, b: EQ(a, b), ufl.classes.NE: lambda x, a, b: NE(a, b), ufl.classes.LT: lambda x, a, b: LT(a, b), ufl.classes.LE: lambda x, a, b: LE(a, b), ufl.classes.AndCondition: lambda x, a, b: And(a, b), ufl.classes.OrCondition: lambda x, a, b: Or(a, b), ufl.classes.NotCondition: lambda x, a: Not(a), ufl.classes.Conditional: lambda x, c, t, f: Conditional(c, t, f), ufl.classes.MinValue: _math_function, ufl.classes.MaxValue: _math_function, ufl.mathfunctions.Sqrt: _math_function, ufl.mathfunctions.Ln: _math_function, ufl.mathfunctions.Exp: _math_function, ufl.mathfunctions.Cos: _math_function, ufl.mathfunctions.Sin: _math_function, ufl.mathfunctions.Tan: _math_function, ufl.mathfunctions.Cosh: _math_function, ufl.mathfunctions.Sinh: _math_function, ufl.mathfunctions.Tanh: _math_function, ufl.mathfunctions.Acos: _math_function, ufl.mathfunctions.Asin: _math_function, ufl.mathfunctions.Atan: _math_function, ufl.mathfunctions.Erf: _math_function, ufl.mathfunctions.Atan2: _math_function, ufl.mathfunctions.MathFunction: _math_function, ufl.mathfunctions.BesselJ: _math_function, ufl.mathfunctions.BesselY: _math_function} def ufl_to_lnodes(operator, *args): # Call appropriate handler, depending on the type of operator optype = type(operator) if optype in _ufl_call_lookup: return _ufl_call_lookup[optype](operator, *args) else: raise RuntimeError(f"Missing lookup for expr type {optype}.") ffcx-0.7.0/ffcx/codegeneration/symbols.py000066400000000000000000000167651450721277100204470ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """FFCx/UFC specific symbol naming.""" import logging import ufl import ffcx.codegeneration.lnodes as L logger = logging.getLogger("ffcx") # TODO: Get restriction postfix from somewhere central def ufcx_restriction_postfix(restriction): if restriction == "+": res = "_0" elif restriction == "-": res = "_1" else: res = "" return res def format_mt_name(basename, mt): """Format variable name for modified terminal.""" access = str(basename) # Add averaged state to name if mt.averaged is not None: avg = f"_a{mt.averaged}" access += avg # Format restriction res = ufcx_restriction_postfix(mt.restriction).replace("_", "_r") access += res # Format global derivatives if mt.global_derivatives: assert basename == "J" der = f"_deriv_{''.join(map(str, mt.global_derivatives))}" access += der # Format local derivatives if mt.local_derivatives: # Convert "listing" derivative multindex into "counting" representation gdim = ufl.domain.extract_unique_domain(mt.terminal).geometric_dimension() ld_counting = tuple(mt.local_derivatives.count(i) for i in range(gdim)) der = f"_d{''.join(map(str, ld_counting))}" access += der # Add flattened component to name if mt.component: comp = f"_c{mt.flat_component}" access += comp return access class FFCXBackendSymbols(object): """FFCx specific symbol definitions. Provides non-ufl symbols.""" def __init__(self, coefficient_numbering, coefficient_offsets, original_constant_offsets): self.coefficient_numbering = coefficient_numbering self.coefficient_offsets = coefficient_offsets self.original_constant_offsets = original_constant_offsets # Keep tabs on tables, so the symbols can be reused self.quadrature_weight_tables = {} self.element_tables = {} # Reusing a single symbol for all quadrature loops, assumed not to be nested. self.quadrature_loop_index = L.Symbol("iq", dtype=L.DataType.INT) # Symbols for the tabulate_tensor function arguments self.element_tensor = L.Symbol("A", dtype=L.DataType.SCALAR) self.coefficients = L.Symbol("w", dtype=L.DataType.SCALAR) self.constants = L.Symbol("c", dtype=L.DataType.SCALAR) self.coordinate_dofs = L.Symbol("coordinate_dofs", dtype=L.DataType.REAL) self.entity_local_index = L.Symbol("entity_local_index", dtype=L.DataType.INT) self.quadrature_permutation = L.Symbol("quadrature_permutation", dtype=L.DataType.INT) # Index for loops over coefficient dofs, assumed to never be used in two nested loops. self.coefficient_dof_sum_index = L.Symbol("ic", dtype=L.DataType.INT) # Table for chunk of custom quadrature weights (including cell measure scaling). self.custom_weights_table = L.Symbol("weights_chunk", dtype=L.DataType.REAL) # Table for chunk of custom quadrature points (physical coordinates). self.custom_points_table = L.Symbol("points_chunk", dtype=L.DataType.REAL) def entity(self, entitytype, restriction): """Entity index for lookup in element tables.""" if entitytype == "cell": # Always 0 for cells (even with restriction) return L.LiteralInt(0) if entitytype == "facet": if restriction == "-": return self.entity_local_index[1] else: return self.entity_local_index[0] elif entitytype == "vertex": return self.entity_local_index[0] else: logging.exception(f"Unknown entitytype {entitytype}") def argument_loop_index(self, iarg): """Loop index for argument #iarg.""" indices = ["i", "j", "k", "l"] return L.Symbol(indices[iarg], dtype=L.DataType.INT) def weights_table(self, quadrature_rule): """Table of quadrature weights.""" key = f"weights_{quadrature_rule.id()}" if key not in self.quadrature_weight_tables: self.quadrature_weight_tables[key] = L.Symbol(f"weights_{quadrature_rule.id()}", dtype=L.DataType.REAL) return self.quadrature_weight_tables[key] def points_table(self, quadrature_rule): """Table of quadrature points (points on the reference integration entity).""" return L.Symbol(f"points_{quadrature_rule.id()}", dtype=L.DataType.REAL) def x_component(self, mt): """Physical coordinate component.""" return L.Symbol(format_mt_name("x", mt), dtype=L.DataType.REAL) def J_component(self, mt): """Jacobian component.""" # FIXME: Add domain number! return L.Symbol(format_mt_name("J", mt), dtype=L.DataType.REAL) def domain_dof_access(self, dof, component, gdim, num_scalar_dofs, restriction): # FIXME: Add domain number or offset! offset = 0 if restriction == "-": offset = num_scalar_dofs * 3 return self.coordinate_dofs[3 * dof + component + offset] def domain_dofs_access(self, gdim, num_scalar_dofs, restriction): # FIXME: Add domain number or offset! return [ self.domain_dof_access(dof, component, gdim, num_scalar_dofs, restriction) for dof in range(num_scalar_dofs) for component in range(gdim) ] def coefficient_dof_access(self, coefficient, dof_index): offset = self.coefficient_offsets[coefficient] w = self.coefficients return w[offset + dof_index] def coefficient_dof_access_blocked(self, coefficient: ufl.Coefficient, index, block_size, dof_offset): coeff_offset = self.coefficient_offsets[coefficient] w = self.coefficients _w = L.Symbol(f"_w_{coeff_offset}_{dof_offset}", dtype=L.DataType.SCALAR) unit_stride_access = _w[index] original_access = w[coeff_offset + index * block_size + dof_offset] return unit_stride_access, original_access def coefficient_value(self, mt): """Symbol for variable holding value or derivative component of coefficient.""" c = self.coefficient_numbering[mt.terminal] return L.Symbol(format_mt_name("w%d" % (c, ), mt), dtype=L.DataType.SCALAR) def constant_index_access(self, constant, index): offset = self.original_constant_offsets[constant] c = self.constants return c[offset + index] def element_table(self, tabledata, entitytype, restriction): entity = self.entity(entitytype, restriction) if tabledata.is_uniform: entity = 0 else: entity = self.entity(entitytype, restriction) if tabledata.is_piecewise: iq = 0 else: iq = self.quadrature_loop_index if tabledata.is_permuted: qp = self.quadrature_permutation[0] if restriction == "-": qp = self.quadrature_permutation[1] else: qp = 0 # Return direct access to element table, reusing symbol if possible if tabledata.name not in self.element_tables: self.element_tables[tabledata.name] = L.Symbol(tabledata.name, dtype=L.DataType.REAL) return self.element_tables[tabledata.name][qp][entity][iq] ffcx-0.7.0/ffcx/codegeneration/ufcx.h000066400000000000000000000360661450721277100175170ustar00rootroot00000000000000/// This is UFCx /// This code is released into the public domain. /// /// The FEniCS Project (http://www.fenicsproject.org/) 2006-2021. /// /// UFCx defines the interface between code generated by FFCx and the /// DOLFINx C++ library. Changes here must be reflected both in the FFCx /// code generation and in the DOLFINx library calls. #pragma once #define UFCX_VERSION_MAJOR 0 #define UFCX_VERSION_MINOR 7 #define UFCX_VERSION_MAINTENANCE 0 #define UFCX_VERSION_RELEASE 1 #if UFCX_VERSION_RELEASE #define UFCX_VERSION \ UFCX_VERSION_MAJOR "." UFCX_VERSION_MINOR "." UFCX_VERSION_MAINTENANCE #else #define UFCX_VERSION \ UFCX_VERSION_MAJOR "." UFCX_VERSION_MINOR "." UFCX_VERSION_MAINTENANCE ".dev0" #endif #include #include #ifdef __cplusplus extern "C" { #if defined(__clang__) #define restrict #elif defined(__GNUC__) || defined(__GNUG__) #define restrict __restrict__ #else #define restrict #endif // restrict #endif // __cplusplus // typedef enum { interval = 10, triangle = 20, quadrilateral = 30, tetrahedron = 40, hexahedron = 50, vertex = 60, prism = 70, pyramid = 80 } ufcx_shape; typedef enum { cell = 0, exterior_facet = 1, interior_facet = 2 } ufcx_integral_type; typedef enum { ufcx_basix_element = 0, ufcx_mixed_element = 1, ufcx_quadrature_element = 2, ufcx_basix_custom_element = 3, ufcx_real_element = 4, } ufcx_element_type; /// Forward declarations typedef struct ufcx_finite_element ufcx_finite_element; typedef struct ufcx_basix_custom_finite_element ufcx_basix_custom_finite_element; typedef struct ufcx_dofmap ufcx_dofmap; typedef struct ufcx_function_space ufcx_function_space; // typedef struct ufcx_finite_element { /// String identifying the finite element const char* signature; /// Cell shape ufcx_shape cell_shape; /// Element type ufcx_element_type element_type; /// Topological dimension of the cell int topological_dimension; /// Geometric dimension of the cell int geometric_dimension; /// Dimension of the finite element function space int space_dimension; /// Rank of the value space int value_rank; /// Dimension of the value space for axis i int* value_shape; /// Number of components of the value space int value_size; /// Rank of the reference value space int reference_value_rank; /// Dimension of the reference value space for axis i int* reference_value_shape; /// Number of components of the reference value space int reference_value_size; /// Maximum polynomial degree of the finite element function space int degree; /// Block size for a VectorElement. For a TensorElement, this is the /// product of the tensor's dimensions int block_size; /// Family of the finite element function space const char* family; /// Basix identifier of the family of the finite element function space int basix_family; /// Basix identifier of the cell shape int basix_cell; /// Indicates whether or not this is the discontinuous version of the element bool discontinuous; /// The Lagrange variant to be passed to Basix's create_element function int lagrange_variant; /// The DPC variant to be passed to Basix's create_element function int dpc_variant; /// Number of sub elements (for a mixed element) int num_sub_elements; /// Get a finite element for sub element i (for a mixed /// element). ufcx_finite_element** sub_elements; /// Pointer to data to recreate the element if it is a custom Basix element ufcx_basix_custom_finite_element* custom_element; } ufcx_finite_element; typedef struct ufcx_basix_custom_finite_element { /// Basix identifier of the cell shape int cell_type; /// Dimension of the value space for axis i int value_shape_length; /// Dimension of the value space for axis i int* value_shape; /// The number of rows in the wcoeffs matrix int wcoeffs_rows; /// The number of columns in the wcoeffs matrix int wcoeffs_cols; /// The coefficients that define the polynomial set of the element in terms /// of the orthonormal polynomials on the cell double* wcoeffs; /// The number of interpolation points associated with each entity int* npts; /// The number of DOFs associated with each entity int* ndofs; // The coordinates of the interpolation points double* x; // The entries in the interpolation matrices double* M; /// The map type for the element int map_type; /// The Sobolev space for the element int sobolev_space; /// Indicates whether or not this is the discontinuous version of the element bool discontinuous; /// The highest degree full polynomial space contained in this element int highest_complete_degree; /// The number of derivatives needed when interpolating int interpolation_nderivs; /// The highest degree of a polynomial in the element int highest_degree; /// The polyset type of the element int polyset_type; } ufcx_basix_custom_finite_element; typedef struct ufcx_dofmap { /// String identifying the dofmap const char* signature; /// Number of dofs with global support (i.e. global constants) int num_global_support_dofs; /// Dimension of the local finite element function space for a cell /// (not including global support dofs) int num_element_support_dofs; /// Return the block size for a VectorElement or TensorElement int block_size; /// Flattened list of dofs associated with each entity int *entity_dofs; /// Offset for dofs of each entity in entity_dofs int *entity_dof_offsets; /// Flattened list of closure dofs associated with each entity int *entity_closure_dofs; /// Offset for closure dofs of each entity in entity_closure_dofs int *entity_closure_dof_offsets; /// Number of dofs associated with each cell entity of dimension d int *num_entity_dofs; /// Tabulate the local-to-local mapping of dofs on entity (d, i) void (*tabulate_entity_dofs)(int* restrict dofs, int d, int i); /// Number of dofs associated with the closure of each cell entity of dimension d int *num_entity_closure_dofs; /// Tabulate the local-to-local mapping of dofs on the closure of entity (d, i) void (*tabulate_entity_closure_dofs)(int* restrict dofs, int d, int i); /// Number of sub dofmaps (for a mixed element) int num_sub_dofmaps; /// Get a dofmap for sub dofmap i (for a mixed element) ufcx_dofmap** sub_dofmaps; } ufcx_dofmap; /// Tabulate integral into tensor A with compiled quadrature rule /// /// @param[out] A /// @param[in] w Coefficients attached to the form to which the /// tabulated integral belongs. /// /// Dimensions: w[coefficient][restriction][dof]. /// /// Restriction dimension /// applies to interior facet integrals, where coefficients restricted /// to both cells sharing the facet must be provided. /// @param[in] c Constants attached to the form to which the tabulated /// integral belongs. Dimensions: c[constant][dim]. /// @param[in] coordinate_dofs Values of degrees of freedom of /// coordinate element. Defines the geometry of the cell. Dimensions: /// coordinate_dofs[restriction][num_dofs][3]. Restriction /// dimension applies to interior facet integrals, where cell /// geometries for both cells sharing the facet must be provided. /// @param[in] entity_local_index Local index of mesh entity on which /// to tabulate. This applies to facet integrals. /// @param[in] quadrature_permutation For facet integrals, numbers to /// indicate the permutation to be applied to each side of the facet /// to make the orientations of the faces matched up should be passed /// in. If an integer of value N is passed in, then: /// /// - floor(N / 2) gives the number of rotations to apply to the /// facet /// - N % 2 gives the number of reflections to apply to the facet /// /// For integrals not on interior facets, this argument has no effect and a /// null pointer can be passed. For interior facets the array will have size 2 /// (one permutation for each cell adjacent to the facet). typedef void(ufcx_tabulate_tensor_float32)( float* restrict A, const float* restrict w, const float* restrict c, const float* restrict coordinate_dofs, const int* restrict entity_local_index, const uint8_t* restrict quadrature_permutation); /// Tabulate integral into tensor A with compiled /// quadrature rule and double precision /// /// @see ufcx_tabulate_tensor_single typedef void(ufcx_tabulate_tensor_float64)( double* restrict A, const double* restrict w, const double* restrict c, const double* restrict coordinate_dofs, const int* restrict entity_local_index, const uint8_t* restrict quadrature_permutation); /// Tabulate integral into tensor A with compiled /// quadrature rule and extended double precision /// /// @see ufcx_tabulate_tensor_single typedef void(ufcx_tabulate_tensor_longdouble)( long double* restrict A, const long double* restrict w, const long double* restrict c, const long double* restrict coordinate_dofs, const int* restrict entity_local_index, const uint8_t* restrict quadrature_permutation); /// Tabulate integral into tensor A with compiled /// quadrature rule and complex single precision /// /// @see ufcx_tabulate_tensor_single typedef void(ufcx_tabulate_tensor_complex64)( float _Complex* restrict A, const float _Complex* restrict w, const float _Complex* restrict c, const float* restrict coordinate_dofs, const int* restrict entity_local_index, const uint8_t* restrict quadrature_permutation); /// Tabulate integral into tensor A with compiled /// quadrature rule and complex double precision /// /// @see ufcx_tabulate_tensor_single typedef void(ufcx_tabulate_tensor_complex128)( double _Complex* restrict A, const double _Complex* restrict w, const double _Complex* restrict c, const double* restrict coordinate_dofs, const int* restrict entity_local_index, const uint8_t* restrict quadrature_permutation); typedef struct ufcx_integral { const bool* enabled_coefficients; ufcx_tabulate_tensor_float32* tabulate_tensor_float32; ufcx_tabulate_tensor_float64* tabulate_tensor_float64; ufcx_tabulate_tensor_longdouble* tabulate_tensor_longdouble; ufcx_tabulate_tensor_complex64* tabulate_tensor_complex64; ufcx_tabulate_tensor_complex128* tabulate_tensor_complex128; bool needs_facet_permutations; /// Get the coordinate element associated with the geometry of the mesh. ufcx_finite_element* coordinate_element; } ufcx_integral; typedef struct ufcx_expression { /// Evaluate expression into tensor A with compiled evaluation points /// /// @param[out] A /// Dimensions: A[num_points][num_components][num_argument_dofs] /// /// @see ufcx_tabulate_tensor /// ufcx_tabulate_tensor_float32* tabulate_tensor_float32; ufcx_tabulate_tensor_float64* tabulate_tensor_float64; ufcx_tabulate_tensor_longdouble* tabulate_tensor_longdouble; ufcx_tabulate_tensor_complex64* tabulate_tensor_complex64; ufcx_tabulate_tensor_complex128* tabulate_tensor_complex128; /// Number of coefficients int num_coefficients; /// Number of constants int num_constants; /// Original coefficient position for each coefficient const int* original_coefficient_positions; /// List of names of coefficients const char** coefficient_names; /// List of names of constants const char** constant_names; /// Number of evaluation points int num_points; /// Dimension of evaluation point, i.e. topological dimension of /// reference cell int topological_dimension; /// Coordinates of evaluations points. Dimensions: /// points[num_points][topological_dimension] const double* points; /// Shape of expression. Dimension: value_shape[num_components] const int* value_shape; /// Number of components of return_shape int num_components; /// Rank, i.e. number of arguments int rank; /// Function spaces for all functions in the Expression. /// /// Function spaces for coefficients are followed by /// Arguments function spaces. /// Dimensions: function_spaces[num_coefficients + rank] ufcx_function_space** function_spaces; } ufcx_expression; /// This class defines the interface for the assembly of the global /// tensor corresponding to a form with r + n arguments, that is, a /// mapping /// /// a : V1 x V2 x ... Vr x W1 x W2 x ... x Wn -> R /// /// with arguments v1, v2, ..., vr, w1, w2, ..., wn. The rank r /// global tensor A is defined by /// /// A = a(V1, V2, ..., Vr, w1, w2, ..., wn), /// /// where each argument Vj represents the application to the /// sequence of basis functions of Vj and w1, w2, ..., wn are given /// fixed functions (coefficients). typedef struct ufcx_form { /// String identifying the form const char* signature; /// Rank of the global tensor (r) int rank; /// Number of coefficients (n) int num_coefficients; /// Number of constants int num_constants; /// Original coefficient position for each coefficient int* original_coefficient_position; /// Return list of names of coefficients const char** (*coefficient_name_map)(void); /// Return list of names of constants const char** (*constant_name_map)(void); /// Get a finite element for the i-th argument function, where 0 <= /// i < r + n. /// /// @param i Argument number if 0 <= i < r Coefficient number j = i /// - r if r + j <= i < r + n ufcx_finite_element** finite_elements; /// Get a dofmap for the i-th argument function, where 0 <= i < r + /// n. /// /// @param i /// Argument number if 0 <= i < r /// Coefficient number j=i-r if r+j <= i < r+n ufcx_dofmap** dofmaps; /// List of cell, interior facet and exterior facet integrals ufcx_integral** form_integrals; /// IDs for each integral in form_integrals list int* form_integral_ids; /// Offsets for cell, interior facet and exterior facet integrals in form_integrals list int* form_integral_offsets; } ufcx_form; // FIXME: Formalise a UFCX 'function space' typedef struct ufcx_function_space { ufcx_finite_element* finite_element; ufcx_dofmap* dofmap; /// The family of the finite element for the geometry map const char* geometry_family; /// The degree of the finite element for the geometry map int geometry_degree; /// The Basix cell of the finite element for the geometry map int geometry_basix_cell; /// The Basix variant of the finite element for the geometry map int geometry_basix_variant; } ufcx_function_space; #ifdef __cplusplus #undef restrict } #endif ffcx-0.7.0/ffcx/codegeneration/utils.py000066400000000000000000000017421450721277100201040ustar00rootroot00000000000000# Copyright (C) 2020-2023 Michal Habera and Chris Richardson # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later def cdtype_to_numpy(cdtype: str): """Map a C data type string NumPy datatype string.""" if cdtype == "double": return "float64" elif cdtype == "double _Complex": return "complex128" elif cdtype == "float": return "float32" elif cdtype == "float _Complex": return "complex64" elif cdtype == "long double": return "longdouble" else: raise RuntimeError(f"Unknown NumPy type for: {cdtype}") def scalar_to_value_type(scalar_type: str) -> str: """The C value type associated with a C scalar type. Args: scalar_type: A C type. Returns: The value type associated with ``scalar_type``. E.g., if ``scalar_type`` is ``float _Complex`` the return value is 'float'. """ return scalar_type.replace(' _Complex', '') ffcx-0.7.0/ffcx/compiler.py000066400000000000000000000065421450721277100155730ustar00rootroot00000000000000# Copyright (C) 2007-2020 Anders Logg and Michal Habera # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Main interface for compilation of forms. Breaks the compilation into several sequential stages. The output of each stage is the input of the next stage. Compiler stages --------------- 0. Language, parsing - Input: Python code or .ufl file - Output: UFL form This stage consists of parsing and expressing a form in the UFL form language. This stage is handled by UFL. 1. Analysis - Input: UFL form - Output: Preprocessed UFL form and FormData (metadata) This stage preprocesses the UFL form and extracts form metadata. It may also perform simplifications on the form. 2. Code representation - Input: Preprocessed UFL form and FormData (metadata) - Output: Intermediate Representation (IR) This stage examines the input and generates all data needed for code generation. This includes generation of finite element basis functions, extraction of data for mapping of degrees of freedom and possible precomputation of integrals. Most of the complexity of compilation is handled in this stage. The IR is stored as a dictionary, mapping names of UFC functions to data needed for generation of the corresponding code. 3. Code generation - Input: Intermediate Representation (IR) - Output: C code This stage examines the IR and generates the actual C code for the body of each UFC function. The code is stored as a dictionary, mapping names of UFC functions to strings containing the C code of the body of each function. 4. Code formatting - Input: C code - Output: C code files This stage examines the generated C++ code and formats it according to the UFC format, generating as output one or more .h/.c files conforming to the UFC format. """ import logging import typing from time import time from ffcx.analysis import analyze_ufl_objects from ffcx.codegeneration.codegeneration import generate_code from ffcx.formatting import format_code from ffcx.ir.representation import compute_ir logger = logging.getLogger("ffcx") def _print_timing(stage: int, timing: float): logger.info(f"Compiler stage {stage} finished in {timing:.4f} seconds.") def compile_ufl_objects(ufl_objects: typing.List[typing.Any], object_names: typing.Dict = {}, prefix: typing.Optional[str] = None, options: typing.Dict = {}, visualise: bool = False): """Generate UFC code for a given UFL objects. Options ---------- @param ufl_objects: Objects to be compiled. Accepts elements, forms, integrals or coordinate mappings. """ # Stage 1: analysis cpu_time = time() analysis = analyze_ufl_objects(ufl_objects, options) _print_timing(1, time() - cpu_time) # Stage 2: intermediate representation cpu_time = time() ir = compute_ir(analysis, object_names, prefix, options, visualise) _print_timing(2, time() - cpu_time) # Stage 3: code generation cpu_time = time() code = generate_code(ir, options) _print_timing(3, time() - cpu_time) # Stage 4: format code cpu_time = time() code_h, code_c = format_code(code, options) _print_timing(4, time() - cpu_time) return code_h, code_c ffcx-0.7.0/ffcx/element_interface.py000066400000000000000000000066211450721277100174300ustar00rootroot00000000000000# Copyright (C) 2021 Matthew W. Scroggs and Chris Richardson # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Finite element interface.""" import typing import warnings import basix import basix.ufl import ufl import numpy as np import numpy.typing as npt def convert_element(element: ufl.finiteelement.FiniteElementBase) -> basix.ufl._ElementBase: """Convert and element to a FFCx element.""" if isinstance(element, basix.ufl._ElementBase): return element else: warnings.warn( "Use of elements created by UFL is deprecated. You should create elements directly using Basix.", DeprecationWarning) return basix.ufl.convert_ufl_element(element) def basix_index(indices: typing.Tuple[int]) -> int: """Get the Basix index of a derivative.""" return basix.index(*indices) def create_quadrature( cellname: str, degree: int, rule: str, elements: typing.List[basix.ufl._ElementBase] ) -> typing.Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]: """Create a quadrature rule.""" if cellname == "vertex": return (np.ones((1, 0), dtype=np.float64), np.ones(1, dtype=np.float64)) else: celltype = basix.cell.string_to_type(cellname) polyset_type = basix.PolysetType.standard for e in elements: polyset_type = basix.polyset_superset(celltype, polyset_type, e.polyset_type) return basix.make_quadrature( celltype, degree, rule=basix.quadrature.string_to_type(rule), polyset_type=polyset_type) def reference_cell_vertices(cellname: str) -> npt.NDArray[np.float64]: """Get the vertices of a reference cell.""" return basix.geometry(basix.cell.string_to_type(cellname)) def map_facet_points(points: npt.NDArray[np.float64], facet: int, cellname: str) -> npt.NDArray[np.float64]: """Map points from a reference facet to a physical facet.""" geom = basix.geometry(basix.cell.string_to_type(cellname)) facet_vertices = [geom[i] for i in basix.topology(basix.cell.string_to_type(cellname))[-2][facet]] return np.asarray([facet_vertices[0] + sum((i - facet_vertices[0]) * j for i, j in zip(facet_vertices[1:], p)) for p in points], dtype=np.float64) # TODO: remove this deprecated function def QuadratureElement( cellname: str, value_shape: typing.Tuple[int, ...], scheme: typing.Optional[str] = None, degree: typing.Optional[int] = None, points: typing.Optional[npt.NDArray[np.float64]] = None, weights: typing.Optional[npt.NDArray[np.float64]] = None, mapname: str = "identity" ) -> basix.ufl._ElementBase: warnings.warn( "ffcx.element_interface.QuadratureElement is deprecated and will be removed after December 2023. " "Use basix.ufl.quadrature_element instead.", DeprecationWarning) return basix.ufl.quadrature_element( cell=cellname, value_shape=value_shape, scheme=scheme, degree=degree, points=points, weights=weights, mapname=mapname) # TODO: remove this deprecated function def RealElement(element: ufl.finiteelement.FiniteElementBase) -> basix.ufl._ElementBase: warnings.warn( "ffcx.element_interface.RealElement is deprecated and will be removed after December 2023. " "Use basix.ufl.real_element instead.", DeprecationWarning) return basix.ufl.real_element(cell=element.cell().cellname(), value_shape=element.value_shape()) ffcx-0.7.0/ffcx/formatting.py000066400000000000000000000024311450721277100161240ustar00rootroot00000000000000# Copyright (C) 2009-2018 Anders Logg and Garth N. Wells # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Compiler stage 5: Code formatting. This module implements the formatting of UFC code from a given dictionary of generated C++ code for the body of each UFC function. It relies on templates for UFC code available as part of the module ufcx_utils. """ import logging import os logger = logging.getLogger("ffcx") def format_code(code, options: dict): """Format given code in UFC format. Returns two strings with header and source file contents.""" logger.info(79 * "*") logger.info("Compiler stage 5: Formatting code") logger.info(79 * "*") code_c = "" code_h = "" for parts_code in code: code_h += "".join([c[0] for c in parts_code]) code_c += "".join([c[1] for c in parts_code]) return code_h, code_c def write_code(code_h, code_c, prefix, output_dir): _write_file(code_h, prefix, ".h", output_dir) _write_file(code_c, prefix, ".c", output_dir) def _write_file(output, prefix, postfix, output_dir): """Write generated code to file.""" filename = os.path.join(output_dir, prefix + postfix) with open(filename, "w") as hfile: hfile.write(output) ffcx-0.7.0/ffcx/git_commit_hash.py.in000066400000000000000000000004341450721277100175160ustar00rootroot00000000000000# Copyright (C) 2016 Jan Blechta # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later def git_commit_hash(): """Return git changeset hash (returns "unknown" if changeset is not known).""" return "@GIT_COMMIT_HASH" ffcx-0.7.0/ffcx/ir/000077500000000000000000000000001450721277100140125ustar00rootroot00000000000000ffcx-0.7.0/ffcx/ir/__init__.py000066400000000000000000000000001450721277100161110ustar00rootroot00000000000000ffcx-0.7.0/ffcx/ir/analysis/000077500000000000000000000000001450721277100156355ustar00rootroot00000000000000ffcx-0.7.0/ffcx/ir/analysis/__init__.py000066400000000000000000000003411450721277100177440ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Algorithms for the analysis phase of the form compilation.""" ffcx-0.7.0/ffcx/ir/analysis/factorization.py000066400000000000000000000264441450721277100210750ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Algorithms for factorizing argument dependent monomials.""" import logging from functools import singledispatch from ffcx.ir.analysis.graph import ExpressionGraph from ffcx.ir.analysis.modified_terminals import (analyse_modified_terminal, strip_modified_terminal) from ufl import as_ufl, conditional from ufl.classes import (Argument, Conditional, Conj, Division, Product, Sum, Zero) logger = logging.getLogger("ffcx") def build_argument_indices(S): """Build ordered list of indices to modified arguments.""" arg_indices = [] for i, v in S.nodes.items(): arg = strip_modified_terminal(v['expression']) if isinstance(arg, Argument): arg_indices.append(i) # Make a canonical ordering of vertex indices for modified arguments def arg_ordering_key(i): """Return a key for sorting argument vertex indices. Key is based on the properties of the modified terminal. """ mt = analyse_modified_terminal(S.nodes[i]['expression']) return mt.argument_ordering_key() ordered_arg_indices = sorted(arg_indices, key=arg_ordering_key) return ordered_arg_indices def graph_insert(F, expr): """Add new expression expr to factorisation graph or return existing index.""" fi = F.e2i.get(expr) if fi is None: fi = F.number_of_nodes() F.add_node(fi, expression=expr) F.e2i[expr] = fi return fi # Reuse these empty objects where appropriate to save memory noargs = {} # type: ignore @singledispatch def handler(v, fac, sf, F): # Error checking if any(fac): raise RuntimeError( "Assuming that a {0} cannot be applied to arguments. If this is wrong please report a bug.". format(type(v))) # Record non-argument subexpression raise RuntimeError("No arguments") @handler.register(Sum) def handle_sum(v, fac, sf, F): if len(fac) != 2: raise RuntimeError("Assuming binary sum here. This can be fixed if needed.") fac0 = fac[0] fac1 = fac[1] argkeys = set(fac0) | set(fac1) if argkeys: # f*arg + g*arg = (f+g)*arg argkeys = sorted(argkeys) keylen = len(argkeys[0]) factors = {} for argkey in argkeys: if len(argkey) != keylen: raise RuntimeError("Expecting equal argument rank terms among summands.") fi0 = fac0.get(argkey) fi1 = fac1.get(argkey) if fi0 is None: fisum = fi1 elif fi1 is None: fisum = fi0 else: f0 = F.nodes[fi0]['expression'] f1 = F.nodes[fi1]['expression'] fisum = graph_insert(F, f0 + f1) factors[argkey] = fisum else: # non-arg + non-arg raise RuntimeError("No arguments") return factors @handler.register(Product) def handle_product(v, fac, sf, F): if len(fac) != 2: raise RuntimeError("Assuming binary product here. This can be fixed if needed.") fac0 = fac[0] fac1 = fac[1] if not fac0 and not fac1: # non-arg * non-arg raise RuntimeError("No arguments") elif not fac0: # non-arg * arg # Record products of non-arg operand with each factor of arg-dependent operand f0 = sf[0] factors = {} for k1 in sorted(fac1): f1 = F.nodes[fac1[k1]]['expression'] factors[k1] = graph_insert(F, f0 * f1) elif not fac1: # arg * non-arg # Record products of non-arg operand with each factor of arg-dependent operand f1 = sf[1] factors = {} for k0 in sorted(fac0): f0 = F.nodes[fac0[k0]]['expression'] factors[k0] = graph_insert(F, f1 * f0) else: # arg * arg # Record products of each factor of arg-dependent operand factors = {} for k0 in sorted(fac0): f0 = F.nodes[fac0[k0]]['expression'] for k1 in sorted(fac1): f1 = F.nodes[fac1[k1]]['expression'] argkey = tuple(sorted(k0 + k1)) # sort key for canonical representation factors[argkey] = graph_insert(F, f0 * f1) return factors @handler.register(Conj) def handle_conj(v, fac, sf, F): fac = fac[0] if fac: factors = {} for k in fac: f0 = F.nodes[fac[k]]['expression'] factors[k] = graph_insert(F, Conj(f0)) else: raise RuntimeError("No arguments") return factors @handler.register(Division) def handle_division(v, fac, sf, F): fac0 = fac[0] fac1 = fac[1] assert not fac1, "Cannot divide by arguments." if fac0: # arg / non-arg # Record products of non-arg operand with each factor of arg-dependent operand f1 = sf[1] factors = {} for k0 in sorted(fac0): f0 = F.nodes[fac0[k0]]['expression'] factors[k0] = graph_insert(F, f0 / f1) else: # non-arg / non-arg raise RuntimeError("No arguments") return factors @handler.register(Conditional) def handle_conditional(v, fac, sf, F): fac0 = fac[0] fac1 = fac[1] fac2 = fac[2] assert not fac0, "Cannot have argument in condition." if not (fac1 or fac2): # non-arg ? non-arg : non-arg raise RuntimeError("No arguments") else: f0 = sf[0] f1 = sf[1] f2 = sf[2] # Term conditional(c, argument, non-argument) is not legal unless non-argument is 0.0 assert fac1 or isinstance(f1, Zero) assert fac2 or isinstance(f2, Zero) assert () not in fac1 assert () not in fac2 z = as_ufl(0.0) # In general, can decompose like this: # conditional(c, sum_i fi*ui, sum_j fj*uj) -> sum_i conditional(c, fi, 0)*ui + sum_j conditional(c, 0, fj)*uj mas = sorted(set(fac1.keys()) | set(fac2.keys())) factors = {} for k in mas: fi1 = fac1.get(k) fi2 = fac2.get(k) f1 = z if fi1 is None else F.nodes[fi1]['expression'] f2 = z if fi2 is None else F.nodes[fi2]['expression'] factors[k] = graph_insert(F, conditional(f0, f1, f2)) return factors def compute_argument_factorization(S, rank): """Factorizes a scalar expression graph w.r.t. scalar Argument components. The result is a triplet (AV, FV, IM): - The scalar argument component subgraph: AV[ai] = v with the property SV[arg_indices] == AV[:] - An expression graph vertex list with all non-argument factors: FV[fi] = f with the property that none of the expressions depend on Arguments. - A dict representation of the final integrand of rank r: IM = { (ai1_1, ..., ai1_r): fi1, (ai2_1, ..., ai2_r): fi2, } This mapping represents the factorization of SV[-1] w.r.t. Arguments s.t.: SV[-1] := sum(FV[fik] * product(AV[ai] for ai in aik) for aik, fik in IM.items()) where := means equivalence in the mathematical sense, of course in a different technical representation. """ # Extract argument component subgraph arg_indices = build_argument_indices(S) AV = [S.nodes[i]['expression'] for i in arg_indices] # Data structure for building non-argument factors F = ExpressionGraph() # Attach a quick lookup dict for expression to index F.e2i = {} # Insert arguments as first entries in factorisation graph # They will not be connected to other nodes, but will be available # and referred to by the factorisation indices of the 'target' nodes. for v in AV: graph_insert(F, v) # Adding 1.0 as an expression allows avoiding special representation # of arguments when first visited by representing "v" as "1*v" one_index = graph_insert(F, as_ufl(1.0)) # Intermediate factorization for each vertex in SV on the format # SV_factors[si] = None # if SV[si] does not depend on arguments # SV_factors[si] = { argkey: fi } # if SV[si] does depend on arguments, where: # FV[fi] is the expression SV[si] with arguments factored out # argkey is a tuple with indices into SV for each of the argument components SV[si] depends on # SV_factors[si] = { argkey1: fi1, argkey2: fi2, ... } # if SV[si] # is a linear combination of multiple argkey configurations # Factorize each subexpression in order: for si, attr in S.nodes.items(): deps = S.out_edges[si] v = attr['expression'] if si in arg_indices: assert len(deps) == 0 # v is a modified Argument factors = {(si, ): one_index} else: fac = [S.nodes[d]['factors'] for d in deps] if not any(fac): # Entirely scalar (i.e. no arg factors) # Just add unchanged to F graph_insert(F, v) factors = noargs else: # Get scalar factors for dependencies # which do not have arg factors sf = [] for i, d in enumerate(deps): if fac[i]: sf.append(None) else: sf.append(S.nodes[d]['expression']) # Use appropriate handler to deal with Sum, Product, etc. factors = handler(v, fac, sf, F) attr['factors'] = factors assert len(F.nodes) == len(F.e2i) # Prepare a mapping from component of expression to factors factors = {} S_targets = [i for i, v in S.nodes.items() if v.get('target', False)] for S_target in S_targets: # Get the factorizations of the target values if S.nodes[S_target]['factors'] == {}: if rank == 0: # Functionals and expressions: store as no args * factor for comp in S.nodes[S_target]["component"]: factors[comp] = {(): F.e2i[S.nodes[S_target]['expression']]} else: # Zero form of arity 1 or higher: make factors empty pass else: # Forms of arity 1 or higher: # Map argkeys from indices into SV to indices into AV, # and resort keys for canonical representation for argkey, fi in S.nodes[S_target]['factors'].items(): ai_fi = {tuple(sorted(arg_indices.index(si) for si in argkey)): fi} for comp in S.nodes[S_target]["component"]: if factors.get(comp): factors[comp].update(ai_fi) else: factors[comp] = ai_fi # Indices into F that are needed for final result for comp, target in factors.items(): for argkey, fi in target.items(): F.nodes[fi]["target"] = F.nodes[fi].get("target", []) F.nodes[fi]["target"].append(argkey) F.nodes[fi]["component"] = F.nodes[fi].get("component", []) F.nodes[fi]["component"].append(comp) # Compute dependencies in FV for i, v in F.nodes.items(): expr = v['expression'] if not expr._ufl_is_terminal_ and not expr._ufl_is_terminal_modifier_: for o in expr.ufl_operands: F.add_edge(i, F.e2i[o]) return F ffcx-0.7.0/ffcx/ir/analysis/graph.py000066400000000000000000000200101450721277100173010ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Linearized data structure for the computational graph.""" import logging import numpy as np import ufl from ffcx.ir.analysis.modified_terminals import is_modified_terminal from ffcx.ir.analysis.reconstruct import reconstruct from ffcx.ir.analysis.valuenumbering import ValueNumberer logger = logging.getLogger("ffcx") class ExpressionGraph(object): """A directed multi-edge graph. ExpressionGraph allows multiple edges between the same nodes, and respects the insertion order of nodes and edges. """ def __init__(self): # Data structures for directed multi-edge graph self.nodes = {} self.out_edges = {} self.in_edges = {} def number_of_nodes(self): return len(self.nodes) def add_node(self, key, **kwargs): """Add a node with optional properties.""" self.nodes[key] = kwargs self.out_edges[key] = [] self.in_edges[key] = [] def add_edge(self, node1, node2): """Add a directed edge from node1 to node2.""" if node1 not in self.nodes or node2 not in self.nodes: raise KeyError("Adding edge to unknown node") self.out_edges[node1] += [node2] self.in_edges[node2] += [node1] def build_graph_vertices(expressions, skip_terminal_modifiers=False): # Count unique expression nodes G = ExpressionGraph() G.e2i = _count_nodes_with_unique_post_traversal(expressions, skip_terminal_modifiers) # Invert the map to get index->expression GV = sorted(G.e2i, key=G.e2i.get) # Add nodes to 'new' graph structure for i, v in enumerate(GV): G.add_node(i, expression=v) for comp, expr in enumerate(expressions): # Get vertex index representing input expression root V_target = G.e2i[expr] G.nodes[V_target]['target'] = True G.nodes[V_target]['component'] = G.nodes[V_target].get("component", []) G.nodes[V_target]['component'].append(comp) return G def build_scalar_graph(expression): """Build list representation of expression graph covering the given expressions.""" # Populate with vertices G = build_graph_vertices([expression], skip_terminal_modifiers=False) # Build more fine grained computational graph of scalar subexpressions scalar_expressions = rebuild_with_scalar_subexpressions(G) # Build new list representation of graph where all # vertices of V represent single scalar operations G = build_graph_vertices(scalar_expressions, skip_terminal_modifiers=True) # Compute graph edges V_deps = [] for i, v in G.nodes.items(): expr = v['expression'] if expr._ufl_is_terminal_ or expr._ufl_is_terminal_modifier_: V_deps.append(()) else: V_deps.append([G.e2i[o] for o in expr.ufl_operands]) for i, edges in enumerate(V_deps): for j in edges: if i == j: continue G.add_edge(i, j) return G def rebuild_with_scalar_subexpressions(G): """Build a new expression2index mapping where each subexpression is scalar valued. Input: - G.e2i - G.V - G.V_symbols - G.total_unique_symbols Output: - NV - Array with reverse mapping from index to expression - nvs - Tuple of ne2i indices corresponding to the last vertex of G.V """ # Compute symbols over graph and rebuild scalar expression # # New expression which represents usually an algebraic operation # generates a new symbol value_numberer = ValueNumberer(G) # V_symbols maps an index of a node to a list of # symbols which are present in that node V_symbols = value_numberer.compute_symbols() total_unique_symbols = value_numberer.symbol_count # Array to store the scalar subexpression in for each symbol W = np.empty(total_unique_symbols, dtype=object) # Iterate over each graph node in order for i, v in G.nodes.items(): expr = v['expression'] # Find symbols of v components vs = V_symbols[i] # Skip if there's nothing new here (should be the case for indexing types) # New symbols are not given to indexing types, so W[symbol] already equals # an expression, since it was assigned to the symbol in a previous loop # cycle if all(W[s] is not None for s in vs): continue if is_modified_terminal(expr): sh = expr.ufl_shape if sh: # Store each terminal expression component. We may not # actually need all of these later, but that will be # optimized away. # Note: symmetries will be dealt with in the value numbering. ws = [expr[c] for c in ufl.permutation.compute_indices(sh)] else: # Store single modified terminal expression component if len(vs) != 1: raise RuntimeError("Expecting single symbol for scalar valued modified terminal.") ws = [expr] # FIXME: Replace ws[:] with 0's if its table is empty # Possible redesign: loop over modified terminals only first, # then build tables for them, set W[s] = 0.0 for modified terminals with zero table, # then loop over non-(modified terminal)s to reconstruct expression. else: # Find symbols of operands sops = [] for j, vop in enumerate(expr.ufl_operands): if isinstance(vop, ufl.classes.MultiIndex): # TODO: Store MultiIndex in G.V and allocate a symbol to it for this to work if not isinstance(expr, ufl.classes.IndexSum): raise RuntimeError("Not expecting a %s." % type(expr)) sops.append(()) else: # TODO: Build edge datastructure and use instead? # k = G.E[i][j] k = G.e2i[vop] sops.append(V_symbols[k]) # Fetch reconstructed operand expressions wops = [tuple(W[k] for k in so) for so in sops] # Reconstruct scalar subexpressions of v ws = reconstruct(expr, wops) # Store all scalar subexpressions for v symbols if len(vs) != len(ws): raise RuntimeError("Expecting one symbol for each expression.") # Store each new scalar subexpression in W at the index of its symbol handled = set() for s, w in zip(vs, ws): if W[s] is None: W[s] = w handled.add(s) else: assert s in handled # Result of symmetry! - but I think this never gets reached anyway (CNR) # Find symbols of final v from input graph vs = V_symbols[-1] scalar_expressions = W[vs] return scalar_expressions def _count_nodes_with_unique_post_traversal(expressions, skip_terminal_modifiers=False): """Yield o for each node o in expr, child before parent. Never visits a node twice. """ def getops(e): """Get a modifiable list of operands of e, optionally treating modified terminals as a unit.""" # TODO: Maybe use e._ufl_is_terminal_modifier_ if e._ufl_is_terminal_ or (skip_terminal_modifiers and is_modified_terminal(e)): return [] else: return list(e.ufl_operands) e2i = {} stack = [(expr, getops(expr)) for expr in reversed(expressions)] while stack: expr, ops = stack[-1] if expr in e2i: stack.pop() continue for i, o in enumerate(ops): if o is not None and o not in e2i: stack.append((o, getops(o))) ops[i] = None break else: if not isinstance(expr, (ufl.classes.MultiIndex, ufl.classes.Label)): count = len(e2i) e2i[expr] = count stack.pop() return e2i ffcx-0.7.0/ffcx/ir/analysis/indexing.py000066400000000000000000000112731450721277100200200ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Algorithms for working with multiindices.""" import ufl from ufl.classes import ComponentTensor, FixedIndex, Index, Indexed from ufl.permutation import compute_indices from ufl.utils.indexflattening import flatten_multiindex, shape_to_strides def map_indexed_arg_components(indexed): """Build a map from flattened components to subexpression. Builds integer list mapping between flattened components of indexed expression and its underlying tensor-valued subexpression. """ assert isinstance(indexed, Indexed) # AKA indexed = tensor[multiindex] tensor, multiindex = indexed.ufl_operands # AKA e1 = e2[multiindex] # (this renaming is historical, but kept for consistency with all the variables *1,*2 below) e2 = tensor e1 = indexed # Get tensor and index shape sh1 = e1.ufl_shape sh2 = e2.ufl_shape fi1 = e1.ufl_free_indices fi2 = e2.ufl_free_indices fid1 = e1.ufl_index_dimensions fid2 = e2.ufl_index_dimensions # Compute regular and total shape tsh1 = sh1 + fid1 tsh2 = sh2 + fid2 # r1 = len(tsh1) r2 = len(tsh2) # str1 = shape_to_strides(tsh1) str2 = shape_to_strides(tsh2) assert not sh1 assert sh2 # Must have shape to be indexed in the first place assert ufl.product(tsh1) <= ufl.product(tsh2) # Build map from fi2/fid2 position (-offset nmui) to fi1/fid1 position ind2_to_ind1_map = [None] * len(fi2) for k, i in enumerate(fi2): ind2_to_ind1_map[k] = fi1.index(i) # Build map from fi1/fid1 position to mi position nmui = len(multiindex) multiindex_to_ind1_map = [None] * nmui for k, i in enumerate(multiindex): if isinstance(i, Index): multiindex_to_ind1_map[k] = fi1.index(i.count()) # Build map from flattened e1 component to flattened e2 component perm1 = compute_indices(tsh1) ni1 = ufl.product(tsh1) # Situation: e1 = e2[mi] d1 = [None] * ni1 p2 = [None] * r2 assert len(sh2) == nmui for k, i in enumerate(multiindex): if isinstance(i, FixedIndex): p2[k] = int(i) for c1, p1 in enumerate(perm1): for k, i in enumerate(multiindex): if isinstance(i, Index): p2[k] = p1[multiindex_to_ind1_map[k]] for k, i in enumerate(ind2_to_ind1_map): p2[nmui + k] = p1[i] c2 = flatten_multiindex(p2, str2) d1[c1] = c2 # Consistency checks assert all(isinstance(x, int) for x in d1) assert len(set(d1)) == len(d1) return d1 def map_component_tensor_arg_components(tensor): """Build a map from flattened components to subexpression. Builds integer list mapping between flattended components of tensor and its underlying indexed subexpression. """ assert isinstance(tensor, ComponentTensor) # AKA tensor = as_tensor(indexed, multiindex) indexed, multiindex = tensor.ufl_operands e1 = indexed e2 = tensor # e2 = as_tensor(e1, multiindex) mi = [i for i in multiindex if isinstance(i, Index)] # Get tensor and index shapes sh1 = e1.ufl_shape # (sh)ape of e1 sh2 = e2.ufl_shape # (sh)ape of e2 fi1 = e1.ufl_free_indices # (f)ree (i)ndices of e1 fi2 = e2.ufl_free_indices # ... fid1 = e1.ufl_index_dimensions # (f)ree (i)ndex (d)imensions of e1 fid2 = e2.ufl_index_dimensions # ... # Compute total shape (tsh) of e1 and e2 tsh1 = sh1 + fid1 tsh2 = sh2 + fid2 r1 = len(tsh1) # 'total rank' or e1 r2 = len(tsh2) # ... str1 = shape_to_strides(tsh1) assert not sh1 assert sh2 assert len(mi) == len(multiindex) assert ufl.product(tsh1) == ufl.product(tsh2) assert fi1 assert all(i in fi1 for i in fi2) nmui = len(multiindex) assert nmui == len(sh2) # Build map from fi2/fid2 position (-offset nmui) to fi1/fid1 position p2_to_p1_map = [None] * r2 for k, i in enumerate(fi2): p2_to_p1_map[k + nmui] = fi1.index(i) # Build map from fi1/fid1 position to mi position for k, i in enumerate(mi): p2_to_p1_map[k] = fi1.index(mi[k].count()) # Build map from flattened e1 component to flattened e2 component perm2 = compute_indices(tsh2) ni2 = ufl.product(tsh2) # Situation: e2 = as_tensor(e1, mi) d2 = [None] * ni2 p1 = [None] * r1 for c2, p2 in enumerate(perm2): for k2, k1 in enumerate(p2_to_p1_map): p1[k1] = p2[k2] c1 = flatten_multiindex(p1, str1) d2[c2] = c1 # Consistency checks assert all(isinstance(x, int) for x in d2) assert len(set(d2)) == len(d2) return d2 ffcx-0.7.0/ffcx/ir/analysis/modified_terminals.py000066400000000000000000000243241450721277100220520ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import logging from ufl.classes import (Argument, CellAvg, FacetAvg, FixedIndex, FormArgument, Grad, Indexed, Jacobian, ReferenceGrad, ReferenceValue, Restricted, SpatialCoordinate) from ufl.permutation import build_component_numbering from ...element_interface import convert_element logger = logging.getLogger("ffcx") class ModifiedTerminal(object): """A modified terminal expression is an object of a Terminal subtype. It is wrapped in terminal modifier types. The variables of this class are: expr - The original UFL expression terminal - the underlying Terminal object global_derivatives - tuple of ints, each meaning derivative in that global direction local_derivatives - tuple of ints, each meaning derivative in that local direction reference_value - bool, whether this is represented in reference frame averaged - None, 'facet' or 'cell' restriction - None, '+' or '-' component - tuple of ints, the global component of the Terminal flat_component - single int, flattened local component of the Terminal, considering symmetry Possibly other component model: - global_component - reference_component - flat_component """ def __init__(self, expr, terminal, reference_value, base_shape, base_symmetry, component, flat_component, global_derivatives, local_derivatives, averaged, restriction): # The original expression self.expr = expr # The underlying terminal expression self.terminal = terminal # Are we seeing the terminal in physical or reference frame self.reference_value = reference_value # Get the shape of the core terminal or its reference value, # this is the shape that component and flat_component refers to self.base_shape = base_shape self.base_symmetry = base_symmetry # Components self.component = component self.flat_component = flat_component # Derivatives self.global_derivatives = global_derivatives self.local_derivatives = local_derivatives # Evaluation method (alternatives: { None, 'facet_midpoint', # 'cell_midpoint', 'facet_avg', 'cell_avg' }) self.averaged = averaged # Restriction to one cell or the other for interior facet integrals self.restriction = restriction def as_tuple(self): """Return a tuple with hashable values that uniquely identifies this modified terminal. Some of the derived variables can be omitted here as long as they are fully determined from the variables that are included here. """ t = self.terminal # FIXME: Terminal is not sortable... rv = self.reference_value # bs = self.base_shape # bsy = self.base_symmetry # c = self.component fc = self.flat_component gd = self.global_derivatives ld = self.local_derivatives a = self.averaged r = self.restriction return (t, rv, fc, gd, ld, a, r) def argument_ordering_key(self): """Return a key for deterministic sorting of argument vertex indices. The key is based on the properties of the modified terminal. Used in factorization but moved here for closeness with ModifiedTerminal attributes. """ t = self.terminal assert isinstance(t, Argument) n = t.number() assert n >= 0 p = t.part() rv = self.reference_value # bs = self.base_shape # bsy = self.base_symmetry # c = self.component fc = self.flat_component gd = self.global_derivatives ld = self.local_derivatives a = self.averaged r = self.restriction return (n, p, rv, fc, gd, ld, a, r) def __hash__(self): return hash(self.as_tuple()) def __eq__(self, other): return isinstance(other, ModifiedTerminal) and self.as_tuple() == other.as_tuple() # def __lt__(self, other): # error("Shouldn't use this?") # # FIXME: Terminal is not sortable, so the as_tuple contents # # must be changed for this to work properly # return self.as_tuple() < other.as_tuple() def __str__(self): return ( f"terminal: {self.terminal}\n" f"global_derivatives: {self.global_derivatives}\n" f"local_derivatives: {self.local_derivatives}\n" f"averaged: {self.averaged}\n" f"component: {self.component}\n" f"restriction: {self.restriction}") def is_modified_terminal(v): """Check if v is a terminal or a terminal wrapped in terminal modifier types.""" while not v._ufl_is_terminal_: if v._ufl_is_terminal_modifier_: v = v.ufl_operands[0] else: return False return True def strip_modified_terminal(v): """Extract core Terminal from a modified terminal or return None.""" while not v._ufl_is_terminal_: if v._ufl_is_terminal_modifier_: v = v.ufl_operands[0] else: return None return v def analyse_modified_terminal(expr): """Analyse a so-called 'modified terminal' expression. Return its properties in more compact form as a ModifiedTerminal object. A modified terminal expression is an object of a Terminal subtype, wrapped in terminal modifier types. The wrapper types can include 0-* Grad or ReferenceGrad objects, and 0-1 ReferenceValue, 0-1 Restricted, 0-1 Indexed, and 0-1 FacetAvg or CellAvg objects. """ # Data to determine component = None global_derivatives = [] local_derivatives = [] reference_value = None restriction = None averaged = None # Start with expr and strip away layers of modifiers t = expr while not t._ufl_is_terminal_: if isinstance(t, Indexed): if component is not None: raise RuntimeError("Got twice indexed terminal.") t, i = t.ufl_operands component = [int(j) for j in i] if not all(isinstance(j, FixedIndex) for j in i): raise RuntimeError("Expected only fixed indices.") elif isinstance(t, ReferenceValue): if reference_value is not None: raise RuntimeError("Got twice pulled back terminal!") t, = t.ufl_operands reference_value = True elif isinstance(t, ReferenceGrad): if not component: # covers None or () raise RuntimeError("Got local gradient of terminal without prior indexing.") t, = t.ufl_operands local_derivatives.append(component[-1]) component = component[:-1] elif isinstance(t, Grad): if not component: # covers None or () raise RuntimeError("Got local gradient of terminal without prior indexing.") t, = t.ufl_operands global_derivatives.append(component[-1]) component = component[:-1] elif isinstance(t, Restricted): if restriction is not None: raise RuntimeError("Got twice restricted terminal!") restriction = t._side t, = t.ufl_operands elif isinstance(t, CellAvg): if averaged is not None: raise RuntimeError("Got twice averaged terminal!") t, = t.ufl_operands averaged = "cell" elif isinstance(t, FacetAvg): if averaged is not None: raise RuntimeError("Got twice averaged terminal!") t, = t.ufl_operands averaged = "facet" elif t._ufl_terminal_modifiers_: raise RuntimeError("Missing handler for terminal modifier type {}, object is {}.".format( type(t), repr(t))) else: raise RuntimeError("Unexpected type %s object %s." % (type(t), repr(t))) # Make canonical representation of derivatives global_derivatives = tuple(sorted(global_derivatives)) local_derivatives = tuple(sorted(local_derivatives)) # Make reference_value true or false reference_value = reference_value or False # Consistency check if isinstance(t, (SpatialCoordinate, Jacobian)): pass else: if local_derivatives and not reference_value: raise RuntimeError("Local derivatives of non-local value is not legal.") if global_derivatives and reference_value: raise RuntimeError("Global derivatives of local value is not legal.") # Make sure component is an integer tuple if component is None: component = () else: component = tuple(component) # Get the shape of the core terminal or its reference value, this is # the shape that component refers to if isinstance(t, FormArgument): element = convert_element(t.ufl_function_space().ufl_element()) if reference_value: # Ignoring symmetry, assuming already applied in conversion # to reference frame base_symmetry = {} base_shape = element.reference_value_shape() else: base_symmetry = element.symmetry() base_shape = t.ufl_shape else: base_symmetry = {} base_shape = t.ufl_shape # Assert that component is within the shape of the (reference) # terminal if len(component) != len(base_shape): raise RuntimeError("Length of component does not match rank of (reference) terminal.") if not all(c >= 0 and c < d for c, d in zip(component, base_shape)): raise RuntimeError("Component indices %s are outside value shape %s" % (component, base_shape)) # Flatten component vi2si, _ = build_component_numbering(base_shape, base_symmetry) flat_component = vi2si[component] return ModifiedTerminal(expr, t, reference_value, base_shape, base_symmetry, component, flat_component, global_derivatives, local_derivatives, averaged, restriction) ffcx-0.7.0/ffcx/ir/analysis/reconstruct.py000066400000000000000000000142251450721277100205660ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs and Chris Richardson # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import ufl def handle_scalar_nary(o, ops): if o.ufl_shape != (): raise RuntimeError("Expecting scalar.") sops = [op[0] for op in ops] return [o._ufl_expr_reconstruct_(*sops)] def handle_condition(o, ops): # A condition is always scalar, so len(op) == 1 sops = [op[0] for op in ops] return [o._ufl_expr_reconstruct_(*sops)] def handle_conditional(o, ops): # A condition can be non scalar symbols = [] n = len(ops[1]) if len(ops[0]) != 1: raise RuntimeError("Condition should be scalar.") if n != len(ops[2]): raise RuntimeError("Conditional branches should have same shape.") for i in range(len(ops[1])): sops = (ops[0][0], ops[1][i], ops[2][i]) symbols.append(o._ufl_expr_reconstruct_(*sops)) return symbols def handle_elementwise_unary(o, ops): if len(ops) > 1: raise RuntimeError("Expecting unary operator.") return [o._ufl_expr_reconstruct_(op) for op in ops[0]] def handle_division(o, ops): if len(ops) != 2: raise RuntimeError("Expecting two operands.") if len(ops[1]) != 1: raise RuntimeError("Expecting scalar divisor.") b, = ops[1] return [o._ufl_expr_reconstruct_(a, b) for a in ops[0]] def handle_sum(o, ops): if len(ops) != 2: raise RuntimeError("Expecting two operands.") if len(ops[0]) != len(ops[1]): raise RuntimeError("Expecting scalar divisor.") return [o._ufl_expr_reconstruct_(a, b) for a, b in zip(ops[0], ops[1])] def handle_product(o, ops): if len(ops) != 2: raise RuntimeError("Expecting two operands.") # Get the simple cases out of the way if len(ops[0]) == 1: # True scalar * something a, = ops[0] return [ufl.classes.Product(a, b) for b in ops[1]] elif len(ops[1]) == 1: # Something * true scalar b, = ops[1] return [ufl.classes.Product(a, b) for a in ops[0]] # Neither of operands are true scalars, this is the tricky part o0, o1 = o.ufl_operands # Get shapes and index shapes fi = o.ufl_free_indices fi0 = o0.ufl_free_indices fi1 = o1.ufl_free_indices fid = o.ufl_index_dimensions fid0 = o0.ufl_index_dimensions fid1 = o1.ufl_index_dimensions # Need to map each return component to one component of o0 and # one component of o1 indices = ufl.permutation.compute_indices(fid) # Compute which component of o0 is used in component (comp,ind) of o # Compute strides within free index spaces ist0 = ufl.utils.indexflattening.shape_to_strides(fid0) ist1 = ufl.utils.indexflattening.shape_to_strides(fid1) # Map o0 and o1 indices to o indices indmap0 = [fi.index(i) for i in fi0] indmap1 = [fi.index(i) for i in fi1] indks = [(ufl.utils.indexflattening.flatten_multiindex([ind[i] for i in indmap0], ist0), ufl.utils.indexflattening.flatten_multiindex([ind[i] for i in indmap1], ist1)) for ind in indices] # Build products for scalar components results = [ufl.classes.Product(ops[0][k0], ops[1][k1]) for k0, k1 in indks] return results def handle_index_sum(o, ops): summand, mi = o.ufl_operands ic = mi[0].count() fi = summand.ufl_free_indices fid = summand.ufl_index_dimensions ipos = fi.index(ic) d = fid[ipos] # Compute "macro-dimensions" before and after i in the total shape of a predim = ufl.product(summand.ufl_shape) * ufl.product(fid[:ipos]) postdim = ufl.product(fid[ipos + 1:]) # Map each flattened total component of summand to # flattened total component of indexsum o by removing # axis corresponding to summation index ii. ss = ops[0] # Scalar subexpressions of summand if len(ss) != predim * postdim * d: raise RuntimeError("Mismatching number of subexpressions.") sops = [] for i in range(predim): iind = i * (postdim * d) for k in range(postdim): ind = iind + k sops.append([ss[ind + j * postdim] for j in range(d)]) # For each scalar output component, sum over collected subcomponents # TODO: Need to split this into binary additions to work with future CRSArray format, # i.e. emitting more expressions than there are symbols for this node. results = [sum(sop) for sop in sops] return results # TODO: To implement compound tensor operators such as dot and inner, # we need to identify which index to do the contractions over, # and build expressions such as sum(a*b for a,b in zip(aops, bops)) _reconstruct_call_lookup = {ufl.classes.MathFunction: handle_scalar_nary, ufl.classes.Abs: handle_scalar_nary, ufl.classes.MinValue: handle_scalar_nary, ufl.classes.MaxValue: handle_scalar_nary, ufl.classes.Real: handle_elementwise_unary, ufl.classes.Imag: handle_elementwise_unary, ufl.classes.Power: handle_scalar_nary, ufl.classes.BesselFunction: handle_scalar_nary, ufl.classes.Atan2: handle_scalar_nary, ufl.classes.Product: handle_product, ufl.classes.Division: handle_division, ufl.classes.Sum: handle_sum, ufl.classes.IndexSum: handle_index_sum, ufl.classes.Conj: handle_elementwise_unary, ufl.classes.Conditional: handle_conditional, ufl.classes.Condition: handle_condition} def reconstruct(o, *args): # First look for exact match f = _reconstruct_call_lookup.get(type(o), False) if f: return f(o, *args) else: # Look for parent class types instead for k in _reconstruct_call_lookup.keys(): if isinstance(o, k): return _reconstruct_call_lookup[k](o, *args) # Nothing found raise RuntimeError("Not expecting expression of type %s in here." % type(o)) ffcx-0.7.0/ffcx/ir/analysis/valuenumbering.py000066400000000000000000000210221450721277100212270ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Algorithms for value numbering within computational graphs.""" import logging import ufl from ffcx.ir.analysis.indexing import (map_component_tensor_arg_components, map_indexed_arg_components) from ffcx.ir.analysis.modified_terminals import analyse_modified_terminal logger = logging.getLogger("ffcx") class ValueNumberer(object): """Maps scalar components to unique values. An algorithm to map the scalar components of an expression node to unique value numbers, with fallthrough for types that can be mapped to the value numbers of their operands. """ def __init__(self, G): self.symbol_count = 0 self.G = G self.V_symbols = [] self.call_lookup = {ufl.classes.Expr: self.expr, ufl.classes.Argument: self.form_argument, ufl.classes.Coefficient: self.form_argument, ufl.classes.Grad: self._modified_terminal, ufl.classes.ReferenceGrad: self._modified_terminal, ufl.classes.FacetAvg: self._modified_terminal, ufl.classes.CellAvg: self._modified_terminal, ufl.classes.Restricted: self._modified_terminal, ufl.classes.ReferenceValue: self._modified_terminal, ufl.classes.Indexed: self.indexed, ufl.classes.ComponentTensor: self.component_tensor, ufl.classes.ListTensor: self.list_tensor, ufl.classes.Variable: self.variable} def new_symbols(self, n): """Generate new symbols with a running counter.""" begin = self.symbol_count end = begin + n self.symbol_count = end return list(range(begin, end)) def new_symbol(self): """Generate new symbol with a running counter.""" begin = self.symbol_count self.symbol_count += 1 return begin def get_node_symbols(self, expr): idx = [i for i, v in self.G.nodes.items() if v['expression'] == expr][0] return self.V_symbols[idx] def compute_symbols(self): for i, v in self.G.nodes.items(): expr = v['expression'] symbol = None # First look for exact type match f = self.call_lookup.get(type(expr), False) if f: symbol = f(expr) else: # Look for parent class types instead for k in self.call_lookup.keys(): if isinstance(expr, k): symbol = self.call_lookup[k](expr) break if symbol is None: # Nothing found raise RuntimeError("Not expecting type %s here." % type(expr)) self.V_symbols.append(symbol) return self.V_symbols def expr(self, v): """Create new symbols for expressions that represent new values.""" n = ufl.product(v.ufl_shape + v.ufl_index_dimensions) return self.new_symbols(n) def form_argument(self, v): """Create new symbols for expressions that represent new values.""" symmetry = v.ufl_function_space().ufl_element().symmetry() if symmetry: # Build symbols with symmetric components skipped symbols = [] mapped_symbols = {} for c in ufl.permutation.compute_indices(v.ufl_shape): # Build mapped component mc with symmetries from element considered mc = symmetry.get(c, c) # Get existing symbol or create new and store with mapped component mc as key s = mapped_symbols.get(mc) if s is None: s = self.new_symbol() mapped_symbols[mc] = s symbols.append(s) else: n = ufl.product(v.ufl_shape + v.ufl_index_dimensions) symbols = self.new_symbols(n) return symbols # Handle modified terminals with element symmetries and second derivative symmetries! # terminals are implemented separately, or maybe they don't need to be? def _modified_terminal(self, v): """Handle modified terminal. Modifiers: --------- terminal - the underlying Terminal object global_derivatives - tuple of ints, each meaning derivative in that global direction local_derivatives - tuple of ints, each meaning derivative in that local direction reference_value - bool, whether this is represented in reference frame averaged - None, 'facet' or 'cell' restriction - None, '+' or '-' component - tuple of ints, the global component of the Terminal flat_component - single int, flattened local component of the Terminal, considering symmetry """ # (1) mt.terminal.ufl_shape defines a core indexing space UNLESS mt.reference_value, # in which case the reference value shape of the element must be used. # (2) mt.terminal.ufl_element().symmetry() defines core symmetries # (3) averaging and restrictions define distinct symbols, no additional symmetries # (4) two or more grad/reference_grad defines distinct symbols with additional symmetries # v is not necessary scalar here, indexing in (0,...,0) picks the first scalar component # to analyse, which should be sufficient to get the base shape and derivatives if v.ufl_shape: mt = analyse_modified_terminal(v[(0, ) * len(v.ufl_shape)]) else: mt = analyse_modified_terminal(v) # Get derivatives num_ld = len(mt.local_derivatives) num_gd = len(mt.global_derivatives) assert not (num_ld and num_gd) if num_ld: domain = ufl.domain.extract_unique_domain(mt.terminal) tdim = domain.topological_dimension() d_components = ufl.permutation.compute_indices((tdim, ) * num_ld) elif num_gd: domain = ufl.domain.extract_unique_domiain(mt.terminal) gdim = domain.geometric_dimension() d_components = ufl.permutation.compute_indices((gdim, ) * num_gd) else: d_components = [()] # Get base shape without the derivative axes base_components = ufl.permutation.compute_indices(mt.base_shape) # Build symbols with symmetric components and derivatives skipped symbols = [] mapped_symbols = {} for bc in base_components: for dc in d_components: # Build mapped component mc with symmetries from element # and derivatives combined mbc = mt.base_symmetry.get(bc, bc) mdc = tuple(sorted(dc)) mc = mbc + mdc # Get existing symbol or create new and store with # mapped component mc as key s = mapped_symbols.get(mc) if s is None: s = self.new_symbol() mapped_symbols[mc] = s symbols.append(s) # Consistency check before returning symbols assert not v.ufl_free_indices if ufl.product(v.ufl_shape) != len(symbols): raise RuntimeError("Internal error in value numbering.") return symbols # indexed is implemented as a fall-through operation def indexed(self, Aii): # Reuse symbols of arg A for Aii A = Aii.ufl_operands[0] # Get symbols of argument A A_symbols = self.get_node_symbols(A) # Map A_symbols to Aii_symbols d = map_indexed_arg_components(Aii) symbols = [A_symbols[k] for k in d] return symbols def component_tensor(self, A): # Reuse symbols of arg Aii for A Aii = A.ufl_operands[0] # Get symbols of argument Aii Aii_symbols = self.get_node_symbols(Aii) # Map A_symbols to Aii_symbols d = map_component_tensor_arg_components(A) symbols = [Aii_symbols[k] for k in d] return symbols def list_tensor(self, v): symbols = [] for row in v.ufl_operands: symbols.extend(self.get_node_symbols(row)) return symbols def variable(self, v): """Direct reuse of all symbols.""" return self.get_node_symbols(v.ufl_operands[0]) ffcx-0.7.0/ffcx/ir/analysis/visualise.py000066400000000000000000000041321450721277100202130ustar00rootroot00000000000000# Copyright (C) 2018 Chris Richardson # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Utility to draw graphs.""" from ffcx.ir.analysis.modified_terminals import strip_modified_terminal from ufl.classes import (Argument, Division, FloatValue, Indexed, IntValue, Product, ReferenceValue, Sum) def visualise_graph(Gx, filename): try: import pygraphviz as pgv except ImportError: raise RuntimeError("Install pygraphviz") if Gx.number_of_nodes() > 400: print("Skipping visualisation") return G = pgv.AGraph(strict=False, directed=True) for nd, v in Gx.nodes.items(): ex = v['expression'] label = ex.__class__.__name__ if isinstance(ex, Sum): label = '+' elif isinstance(ex, Product): label = '*' elif isinstance(ex, Division): label = '/' elif isinstance(ex, (IntValue, FloatValue)): label = ex.value() elif isinstance(ex, (Indexed, ReferenceValue)): label = str(ex) G.add_node(nd, label='[%d] %s' % (nd, label)) arg = strip_modified_terminal(ex) if isinstance(arg, Argument): G.get_node(nd).attr['shape'] = 'box' stat = v.get('status') if stat == 'piecewise': G.get_node(nd).attr['color'] = 'blue' G.get_node(nd).attr['penwidth'] = 5 elif stat == 'varying': G.get_node(nd).attr['color'] = 'red' G.get_node(nd).attr['penwidth'] = 5 elif stat == 'inactive': G.get_node(nd).attr['color'] = 'dimgray' G.get_node(nd).attr['penwidth'] = 5 t = v.get('target') if t: G.get_node(nd).attr['label'] += ':' + str(t) G.get_node(nd).attr['shape'] = 'hexagon' c = v.get('component') if c: G.get_node(nd).attr['label'] += f", comp={c}" for nd, eds in Gx.out_edges.items(): for ed in eds: G.add_edge(nd, ed) G.layout(prog='dot') G.draw(filename) ffcx-0.7.0/ffcx/ir/elementtables.py000066400000000000000000000433221450721277100172140ustar00rootroot00000000000000# Copyright (C) 2013-2017 Martin Sandve Alnæs # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Tools for precomputed tables of terminal values.""" import logging import typing import numpy as np import numpy.typing as npt import ufl from ffcx.element_interface import (QuadratureElement, basix_index, convert_element) from ffcx.ir.representationutils import (create_quadrature_points_and_weights, integral_type_to_entity_dim, map_integral_points) logger = logging.getLogger("ffcx") # Using same defaults as np.allclose default_rtol = 1e-6 default_atol = 1e-9 piecewise_ttypes = ("piecewise", "fixed", "ones", "zeros") uniform_ttypes = ("fixed", "ones", "zeros", "uniform") class ModifiedTerminalElement(typing.NamedTuple): element: ufl.FiniteElementBase averaged: str local_derivatives: typing.Tuple[int, ...] fc: int class UniqueTableReferenceT(typing.NamedTuple): name: str values: npt.NDArray[np.float64] offset: int block_size: int ttype: str is_piecewise: bool is_uniform: bool is_permuted: bool def equal_tables(a, b, rtol=default_rtol, atol=default_atol): a = np.asarray(a) b = np.asarray(b) if a.shape != b.shape: return False else: return np.allclose(a, b, rtol=rtol, atol=atol) def clamp_table_small_numbers(table, rtol=default_rtol, atol=default_atol, numbers=(-1.0, 0.0, 1.0)): """Clamp almost 0,1,-1 values to integers. Returns new table.""" # Get shape of table and number of columns, defined as the last axis table = np.asarray(table) for n in numbers: table[np.where(np.isclose(table, n, rtol=rtol, atol=atol))] = n return table def get_ffcx_table_values(points, cell, integral_type, element, avg, entitytype, derivative_counts, flat_component): """Extract values from FFCx element table. Returns a 3D numpy array with axes (entity number, quadrature point number, dof number) """ element = convert_element(element) deriv_order = sum(derivative_counts) if integral_type in ufl.custom_integral_types: # Use quadrature points on cell for analysis in custom integral types integral_type = "cell" assert not avg if integral_type == "expression": # FFCx tables for expression are generated as interior cell points integral_type = "cell" if avg in ("cell", "facet"): # Redefine points to compute average tables # Make sure this is not called with points, that doesn't make sense # assert points is None # Not expecting derivatives of averages assert not any(derivative_counts) assert deriv_order == 0 # Doesn't matter if it's exterior or interior facet integral, # just need a valid integral type to create quadrature rule if avg == "cell": integral_type = "cell" elif avg == "facet": integral_type = "exterior_facet" if isinstance(element, QuadratureElement): points = element._points weights = element._weights else: # Make quadrature rule and get points and weights points, weights = create_quadrature_points_and_weights( integral_type, cell, element.highest_degree(), "default", [element]) # Tabulate table of basis functions and derivatives in points for each entity tdim = cell.topological_dimension() entity_dim = integral_type_to_entity_dim(integral_type, tdim) num_entities = cell.num_sub_entities(entity_dim) # Extract arrays for the right scalar component component_tables = [] component_element, offset, stride = element.get_component_element(flat_component) for entity in range(num_entities): entity_points = map_integral_points(points, integral_type, cell, entity) tbl = component_element.tabulate(deriv_order, entity_points) tbl = tbl[basix_index(derivative_counts)] component_tables.append(tbl) if avg in ("cell", "facet"): # Compute numeric integral of the each component table wsum = sum(weights) for entity, tbl in enumerate(component_tables): num_dofs = tbl.shape[1] tbl = np.dot(tbl, weights) / wsum tbl = np.reshape(tbl, (1, num_dofs)) component_tables[entity] = tbl # Loop over entities and fill table blockwise (each block = points x dofs) # Reorder axes as (points, dofs) instead of (dofs, points) assert len(component_tables) == num_entities num_points, num_dofs = component_tables[0].shape shape = (1, num_entities, num_points, num_dofs) res = np.zeros(shape) for entity in range(num_entities): res[:, entity, :, :] = component_tables[entity] return {'array': res, 'offset': offset, 'stride': stride} def generate_psi_table_name(quadrature_rule, element_counter, averaged: str, entitytype, derivative_counts, flat_component): """Generate a name for the psi table. Format: FE#_C#_D###[_AC|_AF|][_F|V][_Q#], where '#' will be an integer value. FE - is a simple counter to distinguish the various bases, it will be assigned in an arbitrary fashion. C - is the component number if any (this does not yet take into account tensor valued functions) D - is the number of derivatives in each spatial direction if any. If the element is defined in 3D, then D012 means d^3(*)/dydz^2. AC - marks that the element values are averaged over the cell AF - marks that the element values are averaged over the facet F - marks that the first array dimension enumerates facets on the cell V - marks that the first array dimension enumerates vertices on the cell Q - unique ID of quadrature rule, to distinguish between tables in a mixed quadrature rule setting """ name = "FE%d" % element_counter if flat_component is not None: name += "_C%d" % flat_component if any(derivative_counts): name += "_D" + "".join(str(d) for d in derivative_counts) name += {None: "", "cell": "_AC", "facet": "_AF"}[averaged] name += {"cell": "", "facet": "_F", "vertex": "_V"}[entitytype] name += f"_Q{quadrature_rule.id()}" return name def get_modified_terminal_element(mt) -> typing.Optional[ModifiedTerminalElement]: gd = mt.global_derivatives ld = mt.local_derivatives domain = ufl.domain.extract_unique_domain(mt.terminal) # Extract element from FormArguments and relevant GeometricQuantities if isinstance(mt.terminal, ufl.classes.FormArgument): if gd and mt.reference_value: raise RuntimeError( "Global derivatives of reference values not defined.") elif ld and not mt.reference_value: raise RuntimeError( "Local derivatives of global values not defined.") element = convert_element(mt.terminal.ufl_function_space().ufl_element()) fc = mt.flat_component elif isinstance(mt.terminal, ufl.classes.SpatialCoordinate): if mt.reference_value: raise RuntimeError("Not expecting reference value of x.") if gd: raise RuntimeError("Not expecting global derivatives of x.") element = convert_element(domain.ufl_coordinate_element()) if not ld: fc = mt.flat_component else: # Actually the Jacobian expressed as reference_grad(x) fc = mt.flat_component # x-component assert len(mt.component) == 1 assert mt.component[0] == mt.flat_component elif isinstance(mt.terminal, ufl.classes.Jacobian): if mt.reference_value: raise RuntimeError("Not expecting reference value of J.") if gd: raise RuntimeError("Not expecting global derivatives of J.") element = convert_element(domain.ufl_coordinate_element()) assert len(mt.component) == 2 # Translate component J[i,d] to x element context rgrad(x[i])[d] fc, d = mt.component # x-component, derivative ld = tuple(sorted((d, ) + ld)) else: return None assert (mt.averaged is None) or not (ld or gd) # Change derivatives format for table lookup tdim = domain.topological_dimension() local_derivatives: typing.Tuple[int, ...] = tuple(ld.count(i) for i in range(tdim)) return ModifiedTerminalElement(element, mt.averaged, local_derivatives, fc) def permute_quadrature_interval(points, reflections=0): output = points.copy() for p in output: assert len(p) < 2 or np.isclose(p[1], 0) assert len(p) < 3 or np.isclose(p[2], 0) for i in range(reflections): for n, p in enumerate(output): output[n] = [1 - p[0]] return output def permute_quadrature_triangle(points, reflections=0, rotations=0): output = points.copy() for p in output: assert len(p) < 3 or np.isclose(p[2], 0) for i in range(rotations): for n, p in enumerate(output): output[n] = [p[1], 1 - p[0] - p[1]] for i in range(reflections): for n, p in enumerate(output): output[n] = [p[1], p[0]] return output def permute_quadrature_quadrilateral(points, reflections=0, rotations=0): output = points.copy() for p in output: assert len(p) < 3 or np.isclose(p[2], 0) for i in range(rotations): for n, p in enumerate(output): output[n] = [p[1], 1 - p[0]] for i in range(reflections): for n, p in enumerate(output): output[n] = [p[1], p[0]] return output def build_optimized_tables(quadrature_rule, cell, integral_type, entitytype, modified_terminals, existing_tables, rtol=default_rtol, atol=default_atol): """Build the element tables needed for a list of modified terminals. Input: entitytype - str modified_terminals - ordered sequence of unique modified terminals FIXME: Document Output: mt_tables - dict(ModifiedTerminal: table data) """ # Add to element tables analysis = {} for mt in modified_terminals: res = get_modified_terminal_element(mt) if res: analysis[mt] = res # Build element numbering using topological ordering so subelements # get priority all_elements = [res[0] for res in analysis.values()] unique_elements = ufl.algorithms.sort_elements( ufl.algorithms.analysis.extract_sub_elements(all_elements)) element_numbers = {element: i for i, element in enumerate(unique_elements)} mt_tables = {} _existing_tables = existing_tables.copy() for mt in modified_terminals: res = analysis.get(mt) if not res: continue element, avg, local_derivatives, flat_component = res # Generate table and store table name with modified terminal # Build name for this particular table element_number = element_numbers[element] name = generate_psi_table_name(quadrature_rule, element_number, avg, entitytype, local_derivatives, flat_component) # FIXME - currently just recalculate the tables every time, # only reusing them if they match numerically. # It should be possible to reuse the cached tables by name, but # the dofmap offset may differ due to restriction. tdim = cell.topological_dimension() if integral_type == "interior_facet": if tdim == 1: t = get_ffcx_table_values(quadrature_rule.points, cell, integral_type, element, avg, entitytype, local_derivatives, flat_component) elif tdim == 2: new_table = [] for ref in range(2): new_table.append(get_ffcx_table_values( permute_quadrature_interval(quadrature_rule.points, ref), cell, integral_type, element, avg, entitytype, local_derivatives, flat_component)) t = new_table[0] t['array'] = np.vstack([td['array'] for td in new_table]) elif tdim == 3: cell_type = cell.cellname() if cell_type == "tetrahedron": new_table = [] for rot in range(3): for ref in range(2): new_table.append(get_ffcx_table_values( permute_quadrature_triangle( quadrature_rule.points, ref, rot), cell, integral_type, element, avg, entitytype, local_derivatives, flat_component)) t = new_table[0] t['array'] = np.vstack([td['array'] for td in new_table]) elif cell_type == "hexahedron": new_table = [] for rot in range(4): for ref in range(2): new_table.append(get_ffcx_table_values( permute_quadrature_quadrilateral( quadrature_rule.points, ref, rot), cell, integral_type, element, avg, entitytype, local_derivatives, flat_component)) t = new_table[0] t['array'] = np.vstack([td['array'] for td in new_table]) else: t = get_ffcx_table_values(quadrature_rule.points, cell, integral_type, element, avg, entitytype, local_derivatives, flat_component) # Clean up table tbl = clamp_table_small_numbers(t['array'], rtol=rtol, atol=atol) tabletype = analyse_table_type(tbl) if tabletype in piecewise_ttypes: # Reduce table to dimension 1 along num_points axis in generated code tbl = tbl[:, :, :1, :] if tabletype in uniform_ttypes: # Reduce table to dimension 1 along num_entities axis in generated code tbl = tbl[:, :1, :, :] is_permuted = is_permuted_table(tbl) if not is_permuted: # Reduce table along num_perms axis tbl = tbl[:1, :, :, :] # Check for existing identical table new_table = True for table_name in _existing_tables: if equal_tables(tbl, _existing_tables[table_name]): name = table_name tbl = _existing_tables[name] new_table = False break if new_table: _existing_tables[name] = tbl cell_offset = 0 element = convert_element(element) if mt.restriction == "-" and isinstance(mt.terminal, ufl.classes.FormArgument): # offset = 0 or number of element dofs, if restricted to "-" cell_offset = element.dim offset = cell_offset + t['offset'] block_size = t['stride'] # tables is just np.arrays, mt_tables hold metadata too mt_tables[mt] = UniqueTableReferenceT( name, tbl, offset, block_size, tabletype, tabletype in piecewise_ttypes, tabletype in uniform_ttypes, is_permuted) return mt_tables def is_zeros_table(table, rtol=default_rtol, atol=default_atol): return (np.prod(table.shape) == 0 or np.allclose(table, np.zeros(table.shape), rtol=rtol, atol=atol)) def is_ones_table(table, rtol=default_rtol, atol=default_atol): return np.allclose(table, np.ones(table.shape), rtol=rtol, atol=atol) def is_quadrature_table(table, rtol=default_rtol, atol=default_atol): _, num_entities, num_points, num_dofs = table.shape Id = np.eye(num_points) return (num_points == num_dofs and all( np.allclose(table[0, i, :, :], Id, rtol=rtol, atol=atol) for i in range(num_entities))) def is_permuted_table(table, rtol=default_rtol, atol=default_atol): return not all( np.allclose(table[0, :, :, :], table[i, :, :, :], rtol=rtol, atol=atol) for i in range(1, table.shape[0])) def is_piecewise_table(table, rtol=default_rtol, atol=default_atol): return all( np.allclose(table[0, :, 0, :], table[0, :, i, :], rtol=rtol, atol=atol) for i in range(1, table.shape[2])) def is_uniform_table(table, rtol=default_rtol, atol=default_atol): return all( np.allclose(table[0, 0, :, :], table[0, i, :, :], rtol=rtol, atol=atol) for i in range(1, table.shape[1])) def analyse_table_type(table, rtol=default_rtol, atol=default_atol): if is_zeros_table(table, rtol=rtol, atol=atol): # Table is empty or all values are 0.0 ttype = "zeros" elif is_ones_table(table, rtol=rtol, atol=atol): # All values are 1.0 ttype = "ones" elif is_quadrature_table(table, rtol=rtol, atol=atol): # Identity matrix mapping points to dofs (separately on each entity) ttype = "quadrature" else: # Equal for all points on a given entity piecewise = is_piecewise_table(table, rtol=rtol, atol=atol) uniform = is_uniform_table(table, rtol=rtol, atol=atol) if piecewise and uniform: # Constant for all points and all entities ttype = "fixed" elif piecewise: # Constant for all points on each entity separately ttype = "piecewise" elif uniform: # Equal on all entities ttype = "uniform" else: # Varying over points and entities ttype = "varying" return ttype ffcx-0.7.0/ffcx/ir/integral.py000066400000000000000000000324541450721277100162010ustar00rootroot00000000000000# Copyright (C) 2013-2020 Martin Sandve Alnæs and Michal Habera # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Main algorithm for building the integral intermediate representation.""" import collections import itertools import logging import typing import numpy as np import ufl from ffcx.ir.analysis.factorization import compute_argument_factorization from ffcx.ir.analysis.graph import build_scalar_graph from ffcx.ir.analysis.modified_terminals import (analyse_modified_terminal, is_modified_terminal) from ffcx.ir.analysis.visualise import visualise_graph from ffcx.ir.elementtables import UniqueTableReferenceT, build_optimized_tables from ufl.algorithms.balancing import balance_modifiers from ufl.checks import is_cellwise_constant from ufl.classes import QuadratureWeight logger = logging.getLogger("ffcx") class ModifiedArgumentDataT(typing.NamedTuple): ma_index: int tabledata: UniqueTableReferenceT class BlockDataT(typing.NamedTuple): ttypes: typing.Tuple[str, ...] # list of table types for each block rank factor_indices_comp_indices: typing.List[typing.Tuple[int, int]] # list of tuples (factor index, component index) all_factors_piecewise: bool # True if all factors for this block are piecewise unames: typing.Tuple[str, ...] # list of unique FE table names for each block rank restrictions: typing.Tuple[str, ...] # restriction "+" | "-" | None for each block rank transposed: bool # block is the transpose of another is_uniform: bool ma_data: typing.Tuple[ModifiedArgumentDataT, ...] # used in "full", "safe" and "partial" is_permuted: bool # Do quad points on facets need to be permuted? def compute_integral_ir(cell, integral_type, entitytype, integrands, argument_shape, p, visualise): # The intermediate representation dict we're building and returning # here ir = {} # Pass on options for consumption in code generation ir["options"] = p # Shared unique tables for all quadrature loops ir["unique_tables"] = {} ir["unique_table_types"] = {} ir["integrand"] = {} for quadrature_rule, integrand in integrands.items(): expression = integrand # Rebalance order of nested terminal modifiers expression = balance_modifiers(expression) # Remove QuadratureWeight terminals from expression and replace with 1.0 expression = replace_quadratureweight(expression) # Build initial scalar list-based graph representation S = build_scalar_graph(expression) # Build terminal_data from V here before factorization. Then we # can use it to derive table properties for all modified # terminals, and then use that to rebuild the scalar graph more # efficiently before argument factorization. We can build # terminal_data again after factorization if that's necessary. initial_terminals = {i: analyse_modified_terminal(v['expression']) for i, v in S.nodes.items() if is_modified_terminal(v['expression'])} mt_table_reference = build_optimized_tables( quadrature_rule, cell, integral_type, entitytype, initial_terminals.values(), ir["unique_tables"], rtol=p["table_rtol"], atol=p["table_atol"]) # Fetch unique tables for this quadrature rule table_types = {v.name: v.ttype for v in mt_table_reference.values()} tables = {v.name: v.values for v in mt_table_reference.values()} S_targets = [i for i, v in S.nodes.items() if v.get('target', False)] num_components = np.int32(np.prod(expression.ufl_shape)) if 'zeros' in table_types.values(): # If there are any 'zero' tables, replace symbolically and rebuild graph for i, mt in initial_terminals.items(): # Set modified terminals with zero tables to zero tr = mt_table_reference.get(mt) if tr is not None and tr.ttype == "zeros": S.nodes[i]['expression'] = ufl.as_ufl(0.0) # Propagate expression changes using dependency list for i, v in S.nodes.items(): deps = [S.nodes[j]['expression'] for j in S.out_edges[i]] if deps: v['expression'] = v['expression']._ufl_expr_reconstruct_(*deps) # Recreate expression with correct ufl_shape expressions = [None, ] * num_components for target in S_targets: for comp in S.nodes[target]["component"]: assert expressions[comp] is None expressions[comp] = S.nodes[target]["expression"] expression = ufl.as_tensor(np.reshape(expressions, expression.ufl_shape)) # Rebuild scalar list-based graph representation S = build_scalar_graph(expression) # Output diagnostic graph as pdf if visualise: visualise_graph(S, 'S.pdf') # Compute factorization of arguments rank = len(argument_shape) F = compute_argument_factorization(S, rank) # Get the 'target' nodes that are factors of arguments, and insert in dict FV_targets = [i for i, v in F.nodes.items() if v.get('target', False)] argument_factorization = {} for fi in FV_targets: # Number of blocks using this factor must agree with number of components # to which this factor contributes. I.e. there are more blocks iff there are more # components assert len(F.nodes[fi]['target']) == len(F.nodes[fi]['component']) k = 0 for w in F.nodes[fi]['target']: comp = F.nodes[fi]['component'][k] argument_factorization[w] = argument_factorization.get(w, []) # Store tuple of (factor index, component index) argument_factorization[w].append((fi, comp)) k += 1 # Get list of indices in F which are the arguments (should be at start) argkeys = set() for w in argument_factorization: argkeys = argkeys | set(w) argkeys = list(argkeys) # Build set of modified_terminals for each mt factorized vertex in F # and attach tables, if appropriate for i, v in F.nodes.items(): expr = v['expression'] if is_modified_terminal(expr): mt = analyse_modified_terminal(expr) F.nodes[i]['mt'] = mt tr = mt_table_reference.get(mt) if tr is not None: F.nodes[i]['tr'] = tr # Attach 'status' to each node: 'inactive', 'piecewise' or 'varying' analyse_dependencies(F, mt_table_reference) # Output diagnostic graph as pdf if visualise: visualise_graph(F, 'F.pdf') # Loop over factorization terms block_contributions = collections.defaultdict(list) for ma_indices, fi_ci in sorted(argument_factorization.items()): # Get a bunch of information about this term assert rank == len(ma_indices) trs = tuple(F.nodes[ai]['tr'] for ai in ma_indices) unames = tuple(tr.name for tr in trs) ttypes = tuple(tr.ttype for tr in trs) assert not any(tt == "zeros" for tt in ttypes) blockmap = [] for tr in trs: begin = tr.offset num_dofs = tr.values.shape[3] dofmap = tuple(begin + i * tr.block_size for i in range(num_dofs)) blockmap.append(dofmap) blockmap = tuple(blockmap) block_is_uniform = all(tr.is_uniform for tr in trs) # Collect relevant restrictions to identify blocks correctly # in interior facet integrals block_restrictions = [] for i, ai in enumerate(ma_indices): if trs[i].is_uniform: r = None else: r = F.nodes[ai]['mt'].restriction block_restrictions.append(r) block_restrictions = tuple(block_restrictions) # Check if each *each* factor corresponding to this argument is piecewise all_factors_piecewise = all(F.nodes[ifi[0]]["status"] == 'piecewise' for ifi in fi_ci) block_is_permuted = False for name in unames: if tables[name].shape[0] > 1: block_is_permuted = True ma_data = [] for i, ma in enumerate(ma_indices): ma_data.append(ModifiedArgumentDataT(ma, trs[i])) block_is_transposed = False # FIXME: Handle transposes for these block types block_unames = unames blockdata = BlockDataT(ttypes, fi_ci, all_factors_piecewise, block_unames, block_restrictions, block_is_transposed, block_is_uniform, tuple(ma_data), block_is_permuted) # Insert in expr_ir for this quadrature loop block_contributions[blockmap].append(blockdata) # Figure out which table names are referenced active_table_names = set() for i, v in F.nodes.items(): tr = v.get('tr') if tr is not None and F.nodes[i]['status'] != 'inactive': active_table_names.add(tr.name) # Figure out which table names are referenced in blocks for blockmap, contributions in itertools.chain( block_contributions.items()): for blockdata in contributions: for mad in blockdata.ma_data: active_table_names.add(mad.tabledata.name) active_tables = {} active_table_types = {} for name in active_table_names: # Drop tables not referenced from modified terminals if table_types[name] not in ("zeros", "ones"): active_tables[name] = tables[name] active_table_types[name] = table_types[name] # Add tables and types for this quadrature rule to global tables dict ir["unique_tables"].update(active_tables) ir["unique_table_types"].update(active_table_types) # Build IR dict for the given expressions # Store final ir for this num_points ir["integrand"][quadrature_rule] = {"factorization": F, "modified_arguments": [F.nodes[i]['mt'] for i in argkeys], "block_contributions": block_contributions} restrictions = [i.restriction for i in initial_terminals.values()] ir["needs_facet_permutations"] = "+" in restrictions and "-" in restrictions return ir def analyse_dependencies(F, mt_unique_table_reference): # Sets 'status' of all nodes to either: 'inactive', 'piecewise' or 'varying' # Children of 'target' nodes are either 'piecewise' or 'varying'. # All other nodes are 'inactive'. # Varying nodes are identified by their tables ('tr'). All their parent # nodes are also set to 'varying' - any remaining active nodes are 'piecewise'. # Set targets, and dependencies to 'active' targets = [i for i, v in F.nodes.items() if v.get('target')] for i, v in F.nodes.items(): v['status'] = 'inactive' while targets: s = targets.pop() F.nodes[s]['status'] = 'active' for j in F.out_edges[s]: if F.nodes[j]['status'] == 'inactive': targets.append(j) # Build piecewise/varying markers for factorized_vertices varying_ttypes = ("varying", "quadrature", "uniform") varying_indices = [] for i, v in F.nodes.items(): if v.get('mt') is None: continue tr = v.get('tr') if tr is not None: ttype = tr.ttype # Check if table computations have revealed values varying over points if ttype in varying_ttypes: varying_indices.append(i) else: if ttype not in ("fixed", "piecewise", "ones", "zeros"): raise RuntimeError("Invalid ttype %s" % (ttype, )) elif not is_cellwise_constant(v['expression']): raise RuntimeError("Error " + str(tr)) # Keeping this check to be on the safe side, # not sure which cases this will cover (if any) # varying_indices.append(i) # Set all parents of active varying nodes to 'varying' while varying_indices: s = varying_indices.pop() if F.nodes[s]['status'] == 'active': F.nodes[s]['status'] = 'varying' for j in F.in_edges[s]: varying_indices.append(j) # Any remaining active nodes must be 'piecewise' for i, v in F.nodes.items(): if v['status'] == 'active': v['status'] = 'piecewise' def replace_quadratureweight(expression): """Remove any QuadratureWeight terminals and replace with 1.0.""" r = [] for node in ufl.corealg.traversal.unique_pre_traversal(expression): if is_modified_terminal(node) and isinstance(node, QuadratureWeight): r.append(node) replace_map = {q: 1.0 for q in r} return ufl.algorithms.replace(expression, replace_map) ffcx-0.7.0/ffcx/ir/representation.py000066400000000000000000000674561450721277100174500ustar00rootroot00000000000000# Copyright (C) 2009-2020 Anders Logg, Martin Sandve Alnæs, Marie E. Rognes, # Kristian B. Oelgaard, Matthew W. Scroggs, Chris Richardson, and others # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Compiler stage 2: Code representation. Module computes intermediate representations of forms, elements and dofmaps. For each UFC function, we extract the data needed for code generation at a later stage. The representation should conform strictly to the naming and order of functions in UFC. Thus, for code generation of the function "foo", one should only need to use the data stored in the intermediate representation under the key "foo". """ import itertools import logging import typing import warnings import numpy as np import numpy.typing as npt import basix import basix.ufl import ufl from ffcx import naming from ffcx.analysis import UFLData from ffcx.element_interface import convert_element from ffcx.ir.integral import compute_integral_ir from ffcx.ir.representationutils import (QuadratureRule, create_quadrature_points_and_weights) from ufl.classes import Integral from ufl.sorting import sorted_expr_sum logger = logging.getLogger("ffcx") class FormIR(typing.NamedTuple): id: int name: str signature: str rank: int num_coefficients: int num_constants: int name_from_uflfile: str function_spaces: typing.Dict[str, typing.Tuple[str, str, str, int, basix.CellType, basix.LagrangeVariant]] original_coefficient_position: typing.List[int] coefficient_names: typing.List[str] constant_names: typing.List[str] finite_elements: typing.List[str] dofmaps: typing.List[str] integral_names: typing.Dict[str, typing.List[str]] subdomain_ids: typing.Dict[str, typing.List[int]] class CustomElementIR(typing.NamedTuple): cell_type: basix.CellType value_shape: typing.Tuple[int, ...] wcoeffs: npt.NDArray[np.float64] x: typing.List[typing.List[npt.NDArray[np.float64]]] M: typing.List[typing.List[npt.NDArray[np.float64]]] map_type: basix.MapType sobolev_space: basix.SobolevSpace interpolation_nderivs: int discontinuous: bool highest_complete_degree: int highest_degree: int polyset_type: basix.PolysetType class ElementIR(typing.NamedTuple): id: int name: str signature: str cell_shape: str topological_dimension: int geometric_dimension: int space_dimension: int value_shape: typing.Tuple[int, ...] reference_value_shape: typing.Tuple[int, ...] degree: int family: str num_sub_elements: int block_size: int sub_elements: typing.List[str] element_type: str entity_dofs: typing.List[typing.List[typing.List[int]]] lagrange_variant: basix.LagrangeVariant dpc_variant: basix.DPCVariant basix_family: basix.ElementFamily basix_cell: basix.CellType discontinuous: bool custom_element: CustomElementIR class DofMapIR(typing.NamedTuple): id: int name: str signature: str num_global_support_dofs: int num_element_support_dofs: int entity_dofs: typing.List[typing.List[typing.List[int]]] num_entity_dofs: typing.List[typing.List[int]] entity_closure_dofs: typing.List[typing.List[typing.List[int]]] num_entity_closure_dofs: typing.List[typing.List[int]] num_sub_dofmaps: int sub_dofmaps: typing.List[str] block_size: int class IntegralIR(typing.NamedTuple): integral_type: str subdomain_id: typing.Union[str, typing.Tuple[int, ...], int] rank: int geometric_dimension: int topological_dimension: int entitytype: str num_facets: int num_vertices: int enabled_coefficients: typing.List[bool] element_dimensions: typing.Dict[ufl.FiniteElementBase, int] element_ids: typing.Dict[ufl.FiniteElementBase, int] tensor_shape: typing.List[int] coefficient_numbering: typing.Dict[ufl.Coefficient, int] coefficient_offsets: typing.Dict[ufl.Coefficient, int] original_constant_offsets: typing.Dict[ufl.Constant, int] options: dict cell_shape: str unique_tables: typing.Dict[str, npt.NDArray[np.float64]] unique_table_types: typing.Dict[str, str] integrand: typing.Dict[QuadratureRule, dict] name: str needs_facet_permutations: bool coordinate_element: str class ExpressionIR(typing.NamedTuple): name: str element_dimensions: typing.Dict[ufl.FiniteElementBase, int] options: dict unique_tables: typing.Dict[str, npt.NDArray[np.float64]] unique_table_types: typing.Dict[str, str] integrand: typing.Dict[QuadratureRule, dict] coefficient_numbering: typing.Dict[ufl.Coefficient, int] coefficient_offsets: typing.Dict[ufl.Coefficient, int] integral_type: str entitytype: str tensor_shape: typing.List[int] expression_shape: typing.List[int] original_constant_offsets: typing.Dict[ufl.Constant, int] points: npt.NDArray[np.float64] coefficient_names: typing.List[str] constant_names: typing.List[str] needs_facet_permutations: bool function_spaces: typing.Dict[str, typing.Tuple[str, str, str, int, basix.CellType, basix.LagrangeVariant]] name_from_uflfile: str original_coefficient_positions: typing.List[int] class DataIR(typing.NamedTuple): elements: typing.List[ElementIR] dofmaps: typing.List[DofMapIR] integrals: typing.List[IntegralIR] forms: typing.List[FormIR] expressions: typing.List[ExpressionIR] def compute_ir(analysis: UFLData, object_names, prefix, options, visualise): """Compute intermediate representation.""" logger.info(79 * "*") logger.info("Compiler stage 2: Computing intermediate representation of objects") logger.info(79 * "*") # Compute object names # NOTE: This is done here for performance reasons, because repeated calls # within each IR computation would be expensive due to UFL signature computations finite_element_names = {e: naming.finite_element_name(e, prefix) for e in analysis.unique_elements} dofmap_names = {e: naming.dofmap_name(e, prefix) for e in analysis.unique_elements} integral_names = {} form_names = {} for fd_index, fd in enumerate(analysis.form_data): form_names[fd_index] = naming.form_name(fd.original_form, fd_index, prefix) for itg_index, itg_data in enumerate(fd.integral_data): integral_names[(fd_index, itg_index)] = naming.integral_name(fd.original_form, itg_data.integral_type, fd_index, itg_data.subdomain_id, prefix) ir_elements = [_compute_element_ir(e, analysis.element_numbers, finite_element_names) for e in analysis.unique_elements] ir_dofmaps = [_compute_dofmap_ir(e, analysis.element_numbers, dofmap_names) for e in analysis.unique_elements] irs = [_compute_integral_ir(fd, i, analysis.element_numbers, integral_names, finite_element_names, options, visualise) for (i, fd) in enumerate(analysis.form_data)] ir_integrals = list(itertools.chain(*irs)) ir_forms = [_compute_form_ir(fd, i, prefix, form_names, integral_names, analysis.element_numbers, finite_element_names, dofmap_names, object_names) for (i, fd) in enumerate(analysis.form_data)] ir_expressions = [_compute_expression_ir(expr, i, prefix, analysis, options, visualise, object_names, finite_element_names, dofmap_names) for i, expr in enumerate(analysis.expressions)] return DataIR(elements=ir_elements, dofmaps=ir_dofmaps, integrals=ir_integrals, forms=ir_forms, expressions=ir_expressions) def _compute_element_ir(element, element_numbers, finite_element_names): """Compute intermediate representation of element.""" logger.info(f"Computing IR for element {element}") element = convert_element(element) # Create basix elements cell = element.cell() # Store id ir = {"id": element_numbers[element]} ir["name"] = finite_element_names[element] # Compute data for each function ir["signature"] = repr(element) ir["cell_shape"] = element.cell_type.name ir["topological_dimension"] = cell.topological_dimension() ir["geometric_dimension"] = cell.geometric_dimension() ir["space_dimension"] = element.dim + element.num_global_support_dofs ir["element_type"] = element.ufcx_element_type ir["lagrange_variant"] = element.lagrange_variant ir["dpc_variant"] = element.dpc_variant ir["basix_family"] = element.element_family ir["basix_cell"] = element.cell_type ir["discontinuous"] = element.discontinuous ir["degree"] = element.degree() ir["family"] = element.family_name ir["value_shape"] = element.value_shape() ir["reference_value_shape"] = element.reference_value_shape() ir["num_sub_elements"] = element.num_sub_elements() ir["sub_elements"] = [finite_element_names[e] for e in element.sub_elements()] ir["block_size"] = element.block_size if element.block_size > 1: element = element.sub_element ir["entity_dofs"] = element.entity_dofs if element.is_custom_element: ir["custom_element"] = _compute_custom_element_ir(element.element) else: ir["custom_element"] = None return ElementIR(**ir) def _compute_custom_element_ir(basix_element: basix.finite_element.FiniteElement): """Compute intermediate representation of a custom Basix element.""" ir: typing.Dict[str, typing.Any] = {} ir["cell_type"] = basix_element.cell_type ir["value_shape"] = basix_element.value_shape ir["wcoeffs"] = basix_element.wcoeffs ir["x"] = basix_element.x ir["M"] = basix_element.M ir["map_type"] = basix_element.map_type ir["sobolev_space"] = basix_element.sobolev_space ir["discontinuous"] = basix_element.discontinuous ir["interpolation_nderivs"] = basix_element.interpolation_nderivs ir["highest_complete_degree"] = basix_element.highest_complete_degree ir["highest_degree"] = basix_element.highest_degree ir["polyset_type"] = basix_element.polyset_type return CustomElementIR(**ir) def _compute_dofmap_ir(element, element_numbers, dofmap_names): """Compute intermediate representation of dofmap.""" logger.info(f"Computing IR for dofmap of {element}") # Create basix elements element = convert_element(element) # Store id ir = {"id": element_numbers[element]} ir["name"] = dofmap_names[element] # Compute data for each function ir["signature"] = "FFCx dofmap for " + repr(element) ir["sub_dofmaps"] = [dofmap_names[e] for e in element.sub_elements()] ir["num_sub_dofmaps"] = element.num_sub_elements() ir["block_size"] = element.block_size if element.block_size > 1: element = element.sub_element # Precompute repeatedly used items for i in element.num_entity_dofs: # FIXME: this assumes the same number of DOFs on each entity of the same dim: this # assumption will not be true for prisms and pyramids if max(i) != min(i): raise RuntimeError("Elements with different numbers of DOFs on subentities of the same dimension" " are not yet supported in FFCx.") # FIXME: This does not work for prisms and pyramids num_dofs_per_entity = [i[0] for i in element.num_entity_dofs] ir["num_entity_dofs"] = num_dofs_per_entity ir["entity_dofs"] = element.entity_dofs num_dofs_per_entity_closure = [i[0] for i in element.num_entity_closure_dofs] ir["num_entity_closure_dofs"] = num_dofs_per_entity_closure ir["entity_closure_dofs"] = element.entity_closure_dofs ir["num_global_support_dofs"] = element.num_global_support_dofs ir["num_element_support_dofs"] = element.dim return DofMapIR(**ir) def _compute_integral_ir(form_data, form_index, element_numbers, integral_names, finite_element_names, options, visualise): """Compute intermediate representation for form integrals.""" _entity_types = { "cell": "cell", "exterior_facet": "facet", "interior_facet": "facet", "vertex": "vertex", "custom": "cell" } # Iterate over groups of integrals irs = [] for itg_data_index, itg_data in enumerate(form_data.integral_data): logger.info(f"Computing IR for integral in integral group {itg_data_index}") # Compute representation entitytype = _entity_types[itg_data.integral_type] cell = itg_data.domain.ufl_cell() cellname = cell.cellname() tdim = cell.topological_dimension() assert all(tdim == itg.ufl_domain().topological_dimension() for itg in itg_data.integrals) ir = { "integral_type": itg_data.integral_type, "subdomain_id": itg_data.subdomain_id, "rank": form_data.rank, "geometric_dimension": form_data.geometric_dimension, "topological_dimension": tdim, "entitytype": entitytype, "num_facets": cell.num_facets(), "num_vertices": cell.num_vertices(), "enabled_coefficients": itg_data.enabled_coefficients, "cell_shape": cellname, "coordinate_element": finite_element_names[convert_element(itg_data.domain.ufl_coordinate_element())] } # Get element space dimensions unique_elements = element_numbers.keys() ir["element_dimensions"] = {element: element.dim + element.num_global_support_dofs for element in unique_elements} ir["element_ids"] = { element: i for i, element in enumerate(unique_elements) } # Create dimensions of primary indices, needed to reset the argument # 'A' given to tabulate_tensor() by the assembler. argument_dimensions = [ ir["element_dimensions"][convert_element(element)] for element in form_data.argument_elements ] # Compute shape of element tensor if ir["integral_type"] == "interior_facet": ir["tensor_shape"] = [2 * dim for dim in argument_dimensions] else: ir["tensor_shape"] = argument_dimensions integral_type = itg_data.integral_type cell = itg_data.domain.ufl_cell() # Group integrands with the same quadrature rule grouped_integrands = {} for integral in itg_data.integrals: md = integral.metadata() or {} scheme = md["quadrature_rule"] if scheme == "custom": points = md["quadrature_points"] weights = md["quadrature_weights"] elif scheme == "vertex": # FIXME: Could this come from basix? # The vertex scheme, i.e., averaging the function value in the # vertices and multiplying with the simplex volume, is only of # order 1 and inferior to other generic schemes in terms of # error reduction. Equation systems generated with the vertex # scheme have some properties that other schemes lack, e.g., the # mass matrix is a simple diagonal matrix. This may be # prescribed in certain cases. degree = md["quadrature_degree"] if integral_type != "cell": facet_types = cell.facet_types() assert len(facet_types) == 1 cellname = facet_types[0].cellname() if degree > 1: warnings.warn("Explicitly selected vertex quadrature (degree 1), but requested degree is {}.". format(degree)) if cellname == "tetrahedron": points, weights = (np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]), np.array([1.0 / 24.0, 1.0 / 24.0, 1.0 / 24.0, 1.0 / 24.0])) elif cellname == "triangle": points, weights = (np.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]]), np.array([1.0 / 6.0, 1.0 / 6.0, 1.0 / 6.0])) elif cellname == "interval": # Trapezoidal rule points, weights = (np.array([[0.0], [1.0]]), np.array([1.0 / 2.0, 1.0 / 2.0])) elif cellname == "quadrilateral": points, weights = (np.array([[0., 0], [1., 0.], [0., 1.], [1., 1]]), np.array([1. / 4., 1. / 4., 1. / 4., 1. / 4.])) elif cellname == "hexahedron": points, weights = (np.array([[0., 0., 0.], [1., 0., 0.], [0., 1., 0.], [1., 1., 0.], [0., 0., 1.], [1., 0., 1.], [0., 1., 1.], [1., 1., 1.]]), np.array([1. / 8., 1. / 8., 1. / 8., 1. / 8., 1. / 8., 1. / 8., 1. / 8., 1. / 8.])) else: raise RuntimeError(f"Vertex scheme is not supported for cell: {cellname}") else: degree = md["quadrature_degree"] points, weights = create_quadrature_points_and_weights( integral_type, cell, degree, scheme, [convert_element(e) for e in form_data.argument_elements]) points = np.asarray(points) weights = np.asarray(weights) rule = QuadratureRule(points, weights) if rule not in grouped_integrands: grouped_integrands[rule] = [] grouped_integrands[rule].append(integral.integrand()) sorted_integrals = {} for rule, integrands in grouped_integrands.items(): integrands_summed = sorted_expr_sum(integrands) integral_new = Integral(integrands_summed, itg_data.integral_type, itg_data.domain, itg_data.subdomain_id, {}, None) sorted_integrals[rule] = integral_new # TODO: See if coefficient_numbering can be removed # Build coefficient numbering for UFC interface here, to avoid # renumbering in UFL and application of replace mapping coefficient_numbering = {} for i, f in enumerate(form_data.reduced_coefficients): coefficient_numbering[f] = i # Add coefficient numbering to IR ir["coefficient_numbering"] = coefficient_numbering index_to_coeff = sorted([(v, k) for k, v in coefficient_numbering.items()]) offsets = {} width = 2 if integral_type in ("interior_facet") else 1 _offset = 0 for k, el in zip(index_to_coeff, form_data.coefficient_elements): offsets[k[1]] = _offset _offset += width * ir["element_dimensions"][convert_element(el)] # Copy offsets also into IR ir["coefficient_offsets"] = offsets # Build offsets for Constants original_constant_offsets = {} _offset = 0 for constant in form_data.original_form.constants(): original_constant_offsets[constant] = _offset _offset += np.prod(constant.ufl_shape, dtype=int) ir["original_constant_offsets"] = original_constant_offsets # Create map from number of quadrature points -> integrand integrands = {rule: integral.integrand() for rule, integral in sorted_integrals.items()} # Build more specific intermediate representation integral_ir = compute_integral_ir(itg_data.domain.ufl_cell(), itg_data.integral_type, ir["entitytype"], integrands, ir["tensor_shape"], options, visualise) ir.update(integral_ir) # Fetch name ir["name"] = integral_names[(form_index, itg_data_index)] irs.append(IntegralIR(**ir)) return irs def _compute_form_ir(form_data, form_id, prefix, form_names, integral_names, element_numbers, finite_element_names, dofmap_names, object_names) -> FormIR: """Compute intermediate representation of form.""" logger.info(f"Computing IR for form {form_id}") # Store id ir = {"id": form_id} # Compute common data ir["name"] = form_names[form_id] ir["signature"] = form_data.original_form.signature() ir["rank"] = len(form_data.original_form.arguments()) ir["num_coefficients"] = len(form_data.reduced_coefficients) ir["num_constants"] = len(form_data.original_form.constants()) ir["coefficient_names"] = [object_names.get(id(obj), f"w{j}") for j, obj in enumerate(form_data.reduced_coefficients)] ir["constant_names"] = [object_names.get(id(obj), f"c{j}") for j, obj in enumerate(form_data.original_form.constants())] ir["original_coefficient_position"] = form_data.original_coefficient_positions ir["finite_elements"] = [ finite_element_names[convert_element(e)] for e in form_data.argument_elements + form_data.coefficient_elements ] ir["dofmaps"] = [ dofmap_names[convert_element(e)] for e in form_data.argument_elements + form_data.coefficient_elements ] fs = {} for function in form_data.original_form.arguments() + tuple(form_data.reduced_coefficients): name = object_names.get(id(function), str(function)) if not str(name).isidentifier(): raise ValueError(f"Function name \"{name}\" must be a valid object identifier.") el = convert_element(convert_element(function.ufl_function_space().ufl_element())) cmap = function.ufl_function_space().ufl_domain().ufl_coordinate_element() # Default point spacing for CoordinateElement is equispaced if not isinstance(cmap, basix.ufl._ElementBase) and cmap.variant() is None: cmap._sub_element._variant = "equispaced" cmap = convert_element(cmap) family = cmap.family() degree = cmap.degree() fs[name] = (finite_element_names[el], dofmap_names[el], family, degree, cmap.cell_type, cmap.lagrange_variant) form_name = object_names.get(id(form_data.original_form), form_id) ir["function_spaces"] = fs ir["name_from_uflfile"] = f"form_{prefix}_{form_name}" # Store names of integrals and subdomain_ids for this form, grouped # by integral types since form points to all integrals it contains, # it has to know their names for codegen phase ir["integral_names"] = {} ir["subdomain_ids"] = {} ufcx_integral_types = ("cell", "exterior_facet", "interior_facet") ir["subdomain_ids"] = {itg_type: [] for itg_type in ufcx_integral_types} ir["integral_names"] = {itg_type: [] for itg_type in ufcx_integral_types} for itg_index, itg_data in enumerate(form_data.integral_data): # UFL is using "otherwise" for default integrals (over whole mesh) # but FFCx needs integers, so otherwise = -1 integral_type = itg_data.integral_type subdomain_ids = [sid if sid != "otherwise" else -1 for sid in itg_data.subdomain_id] if min(subdomain_ids) < -1: raise ValueError("Integral subdomain IDs must be non-negative.") ir["subdomain_ids"][integral_type] += subdomain_ids for _ in range(len(subdomain_ids)): ir["integral_names"][integral_type] += [integral_names[(form_id, itg_index)]] return FormIR(**ir) def _compute_expression_ir(expression, index, prefix, analysis, options, visualise, object_names, finite_element_names, dofmap_names): """Compute intermediate representation of expression.""" logger.info(f"Computing IR for expression {index}") # Compute representation ir = {} original_expression = (expression[2], expression[1]) ir["name"] = naming.expression_name(original_expression, prefix) original_expression = expression[2] points = expression[1] expression = expression[0] try: cell = ufl.domain.extract_unique_domain(expression).ufl_cell() except AttributeError: # This case corresponds to a spatially constant expression # without any dependencies cell = None # Prepare dimensions of all unique element in expression, including # elements for arguments, coefficients and coordinate mappings ir["element_dimensions"] = {element: element.dim + element.num_global_support_dofs for element in analysis.unique_elements} # Extract dimensions for elements of arguments only arguments = ufl.algorithms.extract_arguments(expression) argument_elements = tuple(convert_element(f.ufl_function_space().ufl_element()) for f in arguments) argument_dimensions = [ir["element_dimensions"][element] for element in argument_elements] tensor_shape = argument_dimensions ir["tensor_shape"] = tensor_shape ir["expression_shape"] = list(expression.ufl_shape) coefficients = ufl.algorithms.extract_coefficients(expression) coefficient_numbering = {} for i, coeff in enumerate(coefficients): coefficient_numbering[coeff] = i # Add coefficient numbering to IR ir["coefficient_numbering"] = coefficient_numbering original_coefficient_positions = [] original_coefficients = ufl.algorithms.extract_coefficients(original_expression) for coeff in coefficients: original_coefficient_positions.append(original_coefficients.index(coeff)) ir["coefficient_names"] = [object_names.get(id(obj), f"w{j}") for j, obj in enumerate(coefficients)] ir["constant_names"] = [object_names.get(id(obj), f"c{j}") for j, obj in enumerate(ufl.algorithms.analysis.extract_constants(expression))] fs = {} for function in tuple(original_coefficients) + tuple(arguments): name = object_names.get(id(function), str(function)) if not str(name).isidentifier(): raise ValueError(f"Function name \"{name}\" must be a valid object identifier.") el = convert_element(function.ufl_function_space().ufl_element()) cmap = convert_element(function.ufl_function_space().ufl_domain().ufl_coordinate_element()) family = cmap.family() degree = cmap.degree() fs[name] = (finite_element_names[el], dofmap_names[el], family, degree) expression_name = object_names.get(id(original_expression), index) ir["function_spaces"] = fs ir["name_from_uflfile"] = f"expression_{prefix}_{expression_name}" if len(argument_elements) > 1: raise RuntimeError("Expression with more than one Argument not implemented.") ir["original_coefficient_positions"] = original_coefficient_positions coefficient_elements = tuple(convert_element(f.ufl_element()) for f in coefficients) offsets = {} _offset = 0 for i, el in enumerate(coefficient_elements): offsets[coefficients[i]] = _offset _offset += ir["element_dimensions"][convert_element(el)] # Copy offsets also into IR ir["coefficient_offsets"] = offsets ir["integral_type"] = "expression" ir["entitytype"] = "cell" # Build offsets for Constants original_constant_offsets = {} _offset = 0 for constant in ufl.algorithms.analysis.extract_constants(expression): original_constant_offsets[constant] = _offset _offset += np.product(constant.ufl_shape, dtype=int) ir["original_constant_offsets"] = original_constant_offsets ir["points"] = points weights = np.array([1.0] * points.shape[0]) rule = QuadratureRule(points, weights) integrands = {rule: expression} if cell is None: assert len(ir["original_coefficient_positions"]) == 0 and len(ir["original_constant_offsets"]) == 0 expression_ir = compute_integral_ir(cell, ir["integral_type"], ir["entitytype"], integrands, tensor_shape, options, visualise) ir.update(expression_ir) return ExpressionIR(**ir) ffcx-0.7.0/ffcx/ir/representationutils.py000066400000000000000000000067201450721277100205140ustar00rootroot00000000000000# Copyright (C) 2012-2017 Marie Rognes # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Utility functions for some code shared between representations.""" import hashlib import logging import numpy as np import ufl from ffcx.element_interface import (create_quadrature, map_facet_points, reference_cell_vertices) logger = logging.getLogger("ffcx") class QuadratureRule: def __init__(self, points, weights): self.points = np.ascontiguousarray(points) # TODO: change basix to make this unnecessary self.weights = weights self._hash = None def __hash__(self): if self._hash is None: self.hash_obj = hashlib.sha1(self.points) self._hash = int(self.hash_obj.hexdigest(), 32) return self._hash def __eq__(self, other): return np.allclose(self.points, other.points) and np.allclose(self.weights, other.weights) def id(self): """Return unique deterministic identifier. Note ---- This identifier is used to provide unique names to tables and symbols in generated code. """ return self.hash_obj.hexdigest()[-3:] def create_quadrature_points_and_weights(integral_type, cell, degree, rule, elements): """Create quadrature rule and return points and weights.""" if integral_type == "cell": return create_quadrature(cell.cellname(), degree, rule, elements) elif integral_type in ufl.measure.facet_integral_types: facet_types = cell.facet_types() # Raise exception for cells with more than one facet type e.g. prisms if len(facet_types) > 1: raise Exception(f"Cell type {cell} not supported for integral type {integral_type}.") return create_quadrature(facet_types[0].cellname(), degree, rule, elements) elif integral_type in ufl.measure.point_integral_types: return create_quadrature("vertex", degree, rule, elements) elif integral_type == "expression": return (None, None) logging.exception(f"Unknown integral type: {integral_type}") return (None, None) def integral_type_to_entity_dim(integral_type, tdim): """Given integral_type and domain tdim, return the tdim of the integration entity.""" if integral_type == "cell": entity_dim = tdim elif integral_type in ufl.measure.facet_integral_types: entity_dim = tdim - 1 elif integral_type in ufl.measure.point_integral_types: entity_dim = 0 elif integral_type in ufl.custom_integral_types: entity_dim = tdim elif integral_type == "expression": entity_dim = tdim else: raise RuntimeError(f"Unknown integral_type: {integral_type}") return entity_dim def map_integral_points(points, integral_type, cell, entity): """Map points from reference entity to its parent reference cell.""" tdim = cell.topological_dimension() entity_dim = integral_type_to_entity_dim(integral_type, tdim) if entity_dim == tdim: assert points.shape[1] == tdim assert entity == 0 return np.asarray(points) elif entity_dim == tdim - 1: assert points.shape[1] == tdim - 1 return np.asarray(map_facet_points(points, entity, cell.cellname())) elif entity_dim == 0: return np.asarray([reference_cell_vertices(cell.cellname())[entity]]) else: raise RuntimeError(f"Can't map points from entity_dim={entity_dim}") ffcx-0.7.0/ffcx/main.py000066400000000000000000000052011450721277100146740ustar00rootroot00000000000000# Copyright (C) 2004-2020 Anders Logg, Garth N. Wells and Michal Habera # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Command-line interface to FFCx. Parse command-line arguments and generate code from input UFL form files. """ import argparse import cProfile import logging import pathlib import re import string import ufl from ffcx import __version__ as FFCX_VERSION from ffcx import compiler, formatting from ffcx.options import FFCX_DEFAULT_OPTIONS, get_options logger = logging.getLogger("ffcx") parser = argparse.ArgumentParser( description="FEniCS Form Compiler (FFCx, https://fenicsproject.org)") parser.add_argument( "--version", action='version', version=f"%(prog)s (version {FFCX_VERSION})") parser.add_argument("-o", "--output-directory", type=str, default=".", help="output directory") parser.add_argument("--visualise", action="store_true", help="visualise the IR graph") parser.add_argument("-p", "--profile", action='store_true', help="enable profiling") # Add all options from FFCx option system for opt_name, (opt_val, opt_desc) in FFCX_DEFAULT_OPTIONS.items(): parser.add_argument(f"--{opt_name}", type=type(opt_val), help=f"{opt_desc} (default={opt_val})") parser.add_argument("ufl_file", nargs='+', help="UFL file(s) to be compiled") def main(args=None): xargs = parser.parse_args(args) # Parse all other options priority_options = {k: v for k, v in xargs.__dict__.items() if v is not None} options = get_options(priority_options) # Call parser and compiler for each file for filename in xargs.ufl_file: file = pathlib.Path(filename) # Remove weird characters (file system allows more than the C # preprocessor) prefix = file.stem prefix = re.subn("[^{}]".format(string.ascii_letters + string.digits + "_"), "!", prefix)[0] prefix = re.subn("!+", "_", prefix)[0] # Turn on profiling if xargs.profile: pr = cProfile.Profile() pr.enable() # Load UFL file ufd = ufl.algorithms.load_ufl_file(filename) # Generate code code_h, code_c = compiler.compile_ufl_objects( ufd.forms + ufd.expressions + ufd.elements, ufd.object_names, prefix=prefix, options=options, visualise=xargs.visualise) # Write to file formatting.write_code(code_h, code_c, prefix, xargs.output_directory) # Turn off profiling and write status to file if xargs.profile: pr.disable() pfn = f"ffcx_{prefix}.profile" pr.dump_stats(pfn) return 0 ffcx-0.7.0/ffcx/naming.py000066400000000000000000000073121450721277100152260ustar00rootroot00000000000000# Copyright (C) 2009-2020 Anders Logg and Michal Habera # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import hashlib import typing import numpy as np import numpy.typing as npt import ffcx import ufl from .element_interface import convert_element def compute_signature(ufl_objects: typing.List[ typing.Union[ufl.Form, ufl.FiniteElementBase, typing.Tuple[ufl.core.expr.Expr, npt.NDArray[np.float64]]]], tag: str) -> str: """Compute the signature hash. Based on the UFL type of the objects and an additional optional 'tag'. """ object_signature = "" for ufl_object in ufl_objects: # Get signature from ufl object if isinstance(ufl_object, ufl.Form): kind = "form" object_signature += ufl_object.signature() elif isinstance(ufl_object, ufl.FiniteElementBase): object_signature += repr(convert_element(ufl_object)) kind = "element" elif isinstance(ufl_object, tuple) and isinstance(ufl_object[0], ufl.core.expr.Expr): expr = ufl_object[0] points = ufl_object[1] # FIXME Move this to UFL, cache the computation coeffs = ufl.algorithms.extract_coefficients(expr) consts = ufl.algorithms.analysis.extract_constants(expr) args = ufl.algorithms.analysis.extract_arguments(expr) rn = dict() rn.update(dict((c, i) for i, c in enumerate(coeffs))) rn.update(dict((c, i) for i, c in enumerate(consts))) rn.update(dict((c, i) for i, c in enumerate(args))) domains: typing.List[ufl.Mesh] = [] for coeff in coeffs: domains.append(*coeff.ufl_domains()) for arg in args: domains.append(*arg.ufl_function_space().ufl_domains()) for gc in ufl.algorithms.analysis.extract_type(expr, ufl.classes.GeometricQuantity): domains.append(*gc.ufl_domains()) for const in consts: domains.append(const.ufl_domain()) domains = ufl.algorithms.analysis.unique_tuple(domains) rn.update(dict((d, i) for i, d in enumerate(domains))) # Hash on UFL signature and points signature = ufl.algorithms.signature.compute_expression_signature(expr, rn) object_signature += signature object_signature += repr(points) kind = "expression" else: raise RuntimeError(f"Unknown ufl object type {ufl_object.__class__.__name__}") # Build combined signature signatures = [object_signature, str(ffcx.__version__), ffcx.codegeneration.get_signature(), kind, tag] string = ";".join(signatures) return hashlib.sha1(string.encode('utf-8')).hexdigest() def integral_name(original_form, integral_type, form_id, subdomain_id, prefix): sig = compute_signature([original_form], str((prefix, integral_type, form_id, subdomain_id))) return f"integral_{sig}" def form_name(original_form, form_id, prefix): sig = compute_signature([original_form], str((prefix, form_id))) return f"form_{sig}" def finite_element_name(ufl_element, prefix): assert isinstance(ufl_element, ufl.FiniteElementBase) sig = compute_signature([convert_element(ufl_element)], prefix) return f"element_{sig}" def dofmap_name(ufl_element, prefix): assert isinstance(ufl_element, ufl.FiniteElementBase) sig = compute_signature([convert_element(ufl_element)], prefix) return f"dofmap_{sig}" def expression_name(expression, prefix): assert isinstance(expression[0], ufl.core.expr.Expr) sig = compute_signature([expression], prefix) return f"expression_{sig}" ffcx-0.7.0/ffcx/options.py000066400000000000000000000061401450721277100154460ustar00rootroot00000000000000# Copyright (C) 2005-2020 Anders Logg, Michal Habera, Jack S. Hale # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import functools import json import logging import os import os.path import pprint from pathlib import Path from typing import Any, Dict, Optional logger = logging.getLogger("ffcx") FFCX_DEFAULT_OPTIONS = { "epsilon": (1e-14, "Machine precision, used for dropping zero terms in tables"), "scalar_type": ("double", """Scalar type used in generated code. Any of real or complex C floating-point types, e.g. float, double, float _Complex, double _Complex, ..."""), "table_rtol": (1e-6, "Relative precision to use when comparing finite element table values for table reuse."), "table_atol": (1e-9, "Absolute precision to use when comparing finite element table values for reuse."), "verbosity": (30, "Logger verbosity. Follows standard logging library levels, i.e. INFO=20, DEBUG=10, etc.") } @functools.lru_cache(maxsize=None) def _load_options(): """Load options from JSON files.""" user_config_file = os.getenv("XDG_CONFIG_HOME", default=Path.home().joinpath(".config")) \ / Path("ffcx", "ffcx_options.json") try: with open(user_config_file) as f: user_options = json.load(f) except FileNotFoundError: user_options = {} pwd_config_file = Path.cwd().joinpath("ffcx_options.json") try: with open(pwd_config_file) as f: pwd_options = json.load(f) except FileNotFoundError: pwd_options = {} return (user_options, pwd_options) def get_options(priority_options: Optional[dict] = None) -> dict: """Return (a copy of) the merged option values for FFCX. Options ---------- priority_options: take priority over all other option values (see notes) Returns ------- dict: merged option values Notes ----- This function sets the log level from the merged option values prior to returning. The `ffcx_options.json` files are cached on the first call. Subsequent calls to this function use this cache. Priority ordering of options from highest to lowest is: - **priority_options** (API and command line options) - **$PWD/ffcx_options.json** (local options) - **$XDG_CONFIG_HOME/ffcx/ffcx_options.json** (user options) - **FFCX_DEFAULT_OPTIONS** in `ffcx.options` `XDG_CONFIG_HOME` is `~/.config/` if the environment variable is not set. Example `ffcx_options.json` file: { "epsilon": 1e-7 } """ options: Dict[str, Any] = {} for opt, (value, _) in FFCX_DEFAULT_OPTIONS.items(): options[opt] = value # NOTE: _load_options uses functools.lru_cache user_options, pwd_options = _load_options() options.update(user_options) options.update(pwd_options) if priority_options is not None: options.update(priority_options) logger.setLevel(options["verbosity"]) logger.info("Final option values") logger.info(pprint.pformat(options)) return options ffcx-0.7.0/mypy.ini000066400000000000000000000010331450721277100141460ustar00rootroot00000000000000[mypy] # Suggested at https://blog.wolt.com/engineering/2021/09/30/professional-grade-mypy-configuration/ # Goal would be to make all of the below True long-term disallow_untyped_defs = False disallow_any_unimported = False no_implicit_optional = False check_untyped_defs = False warn_return_any = False warn_unused_ignores = False show_error_codes = True [mypy-pygraphviz.*] ignore_missing_imports = True [mypy-ufl.*] ignore_missing_imports = True [mypy-basix.*] ignore_missing_imports = True [mypy-cffi.*] ignore_missing_imports = True ffcx-0.7.0/pyproject.toml000066400000000000000000000013151450721277100153660ustar00rootroot00000000000000[build-system] requires = ["setuptools>=62", "wheel"] build-backend = "setuptools.build_meta" [tool.pytest.ini_options] minversion = "6.0" addopts = "-ra" testpaths = [ "test" ] norecursedirs = [ "libs", "docs" ] log_cli = true [tool.pydocstyle] convention = "numpy" # TODO: Work on removing these ignores add-ignore = [ "D100", # Missing docstrings in modules "D101", # Missing docstrings in classes "D102", # Missing docstrings in methods "D103", # Missing docstrings in functions "D104", # Missing docstrings in packages "D105", # Missing docstrings in magic methods "D401", # Google convention "D406", # Google convention "D407" # Google convention ] ffcx-0.7.0/setup.cfg000066400000000000000000000043101450721277100142710ustar00rootroot00000000000000# Setuptools does not yet support modern pyproject.toml but will do so in the # future [metadata] name = fenics-ffcx version = 0.7.0 author = FEniCS Project Contributors email = fenics-dev@googlegroups.com maintainer = FEniCS Project Steering Council description = The FEniCSx Form Compiler url = https://github.com/FEniCS/ffcx project_urls = Homepage = https://fenicsproject.org Documentation = https://docs.fenicsproject.org Issues = https://github.com/FEniCS/ffcx/issues Funding = https://numfocus.org/donate long_description = file: README.md long_description_content_type = text/markdown license=LGPL-3.0-or-later classifiers = Development Status :: 5 - Production/Stable Intended Audience :: Developers Intended Audience :: Science/Research License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+) Operating System :: POSIX Operating System :: POSIX :: Linux Operating System :: MacOS :: MacOS X Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Topic :: Scientific/Engineering :: Mathematics Topic :: Software Development :: Libraries :: Python Modules Topic :: Software Development :: Code Generators [options] packages = find: include_package_data = True zip_safe = False python_requires = >= 3.8 setup_requires = setuptools >= 62 wheel install_requires = numpy cffi setuptools fenics-basix >= 0.7.0, <0.8.0 fenics-ufl >= 2023.2.0, <2023.3.0 [options.extras_require] docs = sphinx; sphinx_rtd_theme lint = flake8; pydocstyle[toml] optional = pygraphviz == 1.7 test = pytest >= 6.0; sympy ci = coverage coveralls isort pytest-cov pytest-xdist mypy types-setuptools fenics-ffcx[docs] fenics-ffcx[lint] fenics-ffcx[optional] fenics-ffcx[test] [options.entry_points] console_scripts = ffcx = ffcx.__main__:main [flake8] max-line-length = 120 exclude = .git,__pycache__,docs/source/conf.py,build,dist,libs ignore = # Line length W503, # Variable names l, O, I, ... E741, ffcx-0.7.0/test/000077500000000000000000000000001450721277100134315ustar00rootroot00000000000000ffcx-0.7.0/test/Poisson.py000066400000000000000000000024251450721277100154400ustar00rootroot00000000000000# Copyright (C) 2004-2007 Anders Logg # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # The bilinear form a(u, v) and linear form L(v) for # Poisson's equation. # # Compile this form with FFCx: ffcx Poisson.ufl from ufl import (Coefficient, Constant, Mesh, TestFunction, TrialFunction, dx, grad, inner) import basix.ufl mesh = Mesh(basix.ufl.element('P', "triangle", 2, shape=(2, ))) e = basix.ufl.element("Lagrange", "triangle", 2) u = TrialFunction(e) v = TestFunction(e) f = Coefficient(e) kappa1 = Constant(mesh.ufl_cell(), shape=(2, 2)) kappa2 = Constant(mesh.ufl_cell(), shape=(2, 2)) a = inner(kappa1, kappa2) * inner(grad(u), grad(v)) * dx L = f * v * dx ffcx-0.7.0/test/conftest.py000066400000000000000000000003771450721277100156370ustar00rootroot00000000000000# Copyright (C) 2020 Michal Habera # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import pytest @pytest.fixture(scope="module") def compile_args(): return ["-O1", "-Wall", "-Werror"] ffcx-0.7.0/test/test_add_mode.py000066400000000000000000000113661450721277100166050ustar00rootroot00000000000000# Copyright (C) 2019 Chris Richardson # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import numpy as np import pytest import ffcx.codegeneration.jit import basix.ufl import ufl from ffcx.codegeneration.utils import cdtype_to_numpy, scalar_to_value_type @pytest.mark.parametrize("mode", [ "double", "float", "long double", "double _Complex", "float _Complex" ]) def test_additive_facet_integral(mode, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(u, v) * ufl.ds forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) ffi = module.ffi form0 = compiled_forms[0] integral_offsets = form0.form_integral_offsets ex = module.lib.exterior_facet assert integral_offsets[ex + 1] - integral_offsets[ex] == 1 integral_id = form0.form_integral_ids[integral_offsets[ex]] assert integral_id == -1 default_integral = form0.form_integrals[integral_offsets[ex]] np_type = cdtype_to_numpy(mode) A = np.zeros((3, 3), dtype=np_type) w = np.array([], dtype=np_type) c = np.array([], dtype=np_type) facets = np.array([0], dtype=np.int32) perm = np.array([0], dtype=np.uint8) geom_type = scalar_to_value_type(mode) np_gtype = cdtype_to_numpy(geom_type) coords = np.array([0.0, 2.0, 0.0, np.sqrt(3.0), -1.0, 0.0, -np.sqrt(3.0), -1.0, 0.0], dtype=np_gtype) kernel = getattr(default_integral, f"tabulate_tensor_{np_type}") for i in range(3): facets[0] = i kernel(ffi.cast('{type} *'.format(type=mode), A.ctypes.data), ffi.cast('{type} *'.format(type=mode), w.ctypes.data), ffi.cast('{type} *'.format(type=mode), c.ctypes.data), ffi.cast(f'{geom_type} *', coords.ctypes.data), ffi.cast('int *', facets.ctypes.data), ffi.cast('uint8_t *', perm.ctypes.data)) assert np.isclose(A.sum(), np.sqrt(12) * (i + 1)) @pytest.mark.parametrize("mode", ["double", "float", "long double", "double _Complex", "float _Complex"]) def test_additive_cell_integral(mode, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) ffi = module.ffi form0 = compiled_forms[0] cell = module.lib.cell offsets = form0.form_integral_offsets num_integrals = offsets[cell + 1] - offsets[cell] assert num_integrals == 1 integral_id = form0.form_integral_ids[offsets[cell]] assert integral_id == -1 default_integral = form0.form_integrals[offsets[cell]] np_type = cdtype_to_numpy(mode) A = np.zeros((3, 3), dtype=np_type) w = np.array([], dtype=np_type) c = np.array([], dtype=np_type) geom_type = scalar_to_value_type(mode) np_gtype = cdtype_to_numpy(geom_type) coords = np.array([0.0, 2.0, 0.0, np.sqrt(3.0), -1.0, 0.0, -np.sqrt(3.0), -1.0, 0.0], dtype=np_gtype) kernel = getattr(default_integral, f"tabulate_tensor_{np_type}") kernel(ffi.cast('{type} *'.format(type=mode), A.ctypes.data), ffi.cast('{type} *'.format(type=mode), w.ctypes.data), ffi.cast('{type} *'.format(type=mode), c.ctypes.data), ffi.cast(f'{geom_type} *', coords.ctypes.data), ffi.NULL, ffi.NULL) A0 = np.array(A) for i in range(3): kernel(ffi.cast('{type} *'.format(type=mode), A.ctypes.data), ffi.cast('{type} *'.format(type=mode), w.ctypes.data), ffi.cast('{type} *'.format(type=mode), c.ctypes.data), ffi.cast(f'{geom_type} *', coords.ctypes.data), ffi.NULL, ffi.NULL) assert np.all(np.isclose(A, (i + 2) * A0)) ffcx-0.7.0/test/test_blocked_elements.py000066400000000000000000000124261450721277100203460ustar00rootroot00000000000000# Copyright (C) 2020 Matthew Scroggs # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import numpy as np import ffcx import ffcx.codegeneration.jit import basix.ufl import ufl def test_finite_element(compile_args): ufl_element = basix.ufl.element("Lagrange", "triangle", 1) jit_compiled_elements, module, code = ffcx.codegeneration.jit.compile_elements( [ufl_element], cffi_extra_compile_args=compile_args) ufcx_element, ufcx_dofmap = jit_compiled_elements[0] assert ufcx_element.topological_dimension == 2 assert ufcx_element.geometric_dimension == 2 assert ufcx_element.space_dimension == 3 assert ufcx_element.value_rank == 0 assert ufcx_element.value_size == 1 assert ufcx_element.reference_value_rank == 0 assert ufcx_element.reference_value_size == 1 assert ufcx_element.block_size == 1 assert ufcx_element.num_sub_elements == 0 assert ufcx_dofmap.block_size == 1 assert ufcx_dofmap.num_global_support_dofs == 0 assert ufcx_dofmap.num_global_support_dofs == 0 assert ufcx_dofmap.num_element_support_dofs == 3 off = np.array([ufcx_dofmap.entity_dof_offsets[i] for i in range(8)]) assert np.all(np.diff(off) == [1, 1, 1, 0, 0, 0, 0]) for v in range(3): assert ufcx_dofmap.entity_dofs[v] == v assert ufcx_dofmap.num_sub_dofmaps == 0 def test_vector_element(compile_args): ufl_element = basix.ufl.element("Lagrange", "triangle", 1, shape=(2, )) jit_compiled_elements, module, code = ffcx.codegeneration.jit.compile_elements( [ufl_element], cffi_extra_compile_args=compile_args) ufcx_element, ufcx_dofmap = jit_compiled_elements[0] assert ufcx_element.topological_dimension == 2 assert ufcx_element.geometric_dimension == 2 assert ufcx_element.space_dimension == 6 assert ufcx_element.value_rank == 1 assert ufcx_element.value_shape[0] == 2 assert ufcx_element.value_size == 2 assert ufcx_element.reference_value_rank == 1 assert ufcx_element.reference_value_shape[0] == 2 assert ufcx_element.reference_value_size == 2 assert ufcx_element.block_size == 2 assert ufcx_element.num_sub_elements == 2 assert ufcx_dofmap.block_size == 2 assert ufcx_dofmap.num_global_support_dofs == 0 assert ufcx_dofmap.num_global_support_dofs == 0 assert ufcx_dofmap.num_element_support_dofs == 3 off = np.array([ufcx_dofmap.entity_dof_offsets[i] for i in range(8)]) assert np.all(np.diff(off) == [1, 1, 1, 0, 0, 0, 0]) for v in range(3): assert ufcx_dofmap.entity_dofs[v] == v assert ufcx_dofmap.num_sub_dofmaps == 2 def test_tensor_element(compile_args): ufl_element = basix.ufl.element("Lagrange", "triangle", 1, shape=(2, 2)) jit_compiled_elements, module, code = ffcx.codegeneration.jit.compile_elements( [ufl_element], cffi_extra_compile_args=compile_args) ufcx_element, ufcx_dofmap = jit_compiled_elements[0] assert ufcx_element.topological_dimension == 2 assert ufcx_element.geometric_dimension == 2 assert ufcx_element.space_dimension == 12 assert ufcx_element.value_rank == 2 assert ufcx_element.value_shape[0] == 2 assert ufcx_element.value_shape[1] == 2 assert ufcx_element.value_size == 4 assert ufcx_element.reference_value_rank == 2 assert ufcx_element.reference_value_shape[0] == 2 assert ufcx_element.reference_value_shape[1] == 2 assert ufcx_element.reference_value_size == 4 assert ufcx_element.block_size == 4 assert ufcx_element.num_sub_elements == 4 assert ufcx_dofmap.block_size == 4 assert ufcx_dofmap.num_global_support_dofs == 0 assert ufcx_dofmap.num_global_support_dofs == 0 assert ufcx_dofmap.num_element_support_dofs == 3 off = np.array([ufcx_dofmap.entity_dof_offsets[i] for i in range(8)]) assert np.all(np.diff(off) == [1, 1, 1, 0, 0, 0, 0]) for v in range(3): assert ufcx_dofmap.entity_dofs[v] == v assert ufcx_dofmap.num_sub_dofmaps == 4 def test_vector_quadrature_element(compile_args): ufl_element = ufl.VectorElement(ufl.FiniteElement("Quadrature", "tetrahedron", degree=2, quad_scheme="default")) jit_compiled_elements, module, code = ffcx.codegeneration.jit.compile_elements( [ufl_element], cffi_extra_compile_args=compile_args) ufcx_element, ufcx_dofmap = jit_compiled_elements[0] assert ufcx_element.topological_dimension == 3 assert ufcx_element.geometric_dimension == 3 assert ufcx_element.space_dimension == 12 assert ufcx_element.value_rank == 1 assert ufcx_element.value_shape[0] == 3 assert ufcx_element.value_size == 3 assert ufcx_element.reference_value_rank == 1 assert ufcx_element.reference_value_shape[0] == 3 assert ufcx_element.reference_value_size == 3 assert ufcx_element.block_size == 3 assert ufcx_element.num_sub_elements == 3 assert ufcx_dofmap.block_size == 3 assert ufcx_dofmap.num_global_support_dofs == 0 assert ufcx_dofmap.num_global_support_dofs == 0 assert ufcx_dofmap.num_element_support_dofs == 4 off = np.array([ufcx_dofmap.entity_dof_offsets[i] for i in range(16)]) assert np.all(np.diff(off) == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4]) for i in range(4): assert ufcx_dofmap.entity_dofs[i] == i assert ufcx_dofmap.num_sub_dofmaps == 3 ffcx-0.7.0/test/test_cache.py000066400000000000000000000022251450721277100161060ustar00rootroot00000000000000# Copyright (C) 2019 Chris Richardson # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import sys import ffcx.codegeneration.jit import ufl import basix.ufl def test_cache_modes(compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx forms = [a] # Load form from /tmp compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms(forms, cffi_extra_compile_args=compile_args) tmpname = module.__name__ tmpfile = module.__file__ print(tmpname, tmpfile) del sys.modules[tmpname] # Load form from cache compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, cache_dir="./compile-cache", cffi_extra_compile_args=compile_args) newname = module.__name__ newfile = module.__file__ print(newname, newfile) assert newname == tmpname assert newfile != tmpfile ffcx-0.7.0/test/test_cmdline.py000066400000000000000000000007661450721277100164660ustar00rootroot00000000000000# Copyright (C) 2018 Chris N. Richardson # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import os import os.path import subprocess def test_cmdline_simple(): os.chdir(os.path.dirname(__file__)) subprocess.run(["ffcx", "Poisson.py"]) def test_visualise(): os.chdir(os.path.dirname(__file__)) subprocess.run(["ffcx", "--visualise", "Poisson.py"]) assert os.path.isfile("S.pdf") assert os.path.isfile("F.pdf") ffcx-0.7.0/test/test_elements.py000066400000000000000000000245231450721277100166640ustar00rootroot00000000000000# Copyright (C) 2007-2017 Anders Logg and Garth N. Wells # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # # Modified by Marie E. Rognes, 2010 # Modified by Lizao Li, 2016 "Unit tests for FFCx" import numpy as np import pytest import basix.ufl def element_coords(cell): if cell == "interval": return [(0,), (1,)] elif cell == "triangle": return [(0, 0), (1, 0), (0, 1)] elif cell == "tetrahedron": return [(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)] elif cell == "quadrilateral": return [(0, 0), (1, 0), (0, 1), (1, 1)] elif cell == "hexahedron": return [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1)] else: RuntimeError("Unknown cell type") def random_point(shape): w = np.random.random(len(shape)) return sum([np.array(shape[i]) * w[i] for i in range(len(shape))]) / sum(w) @pytest.mark.parametrize("degree, expected_dim", [(1, 3), (2, 6), (3, 10)]) def test_continuous_lagrange(degree, expected_dim): "Test space dimensions of continuous Lagrange elements." P = basix.ufl.element("Lagrange", "triangle", degree) assert P.dim == expected_dim @pytest.mark.parametrize("degree, expected_dim", [(1, 4), (2, 9), (3, 16)]) def xtest_continuous_lagrange_quadrilateral(degree, expected_dim): "Test space dimensions of continuous TensorProduct elements (quadrilateral)." P = basix.ufl.element("Lagrange", "quadrilateral", degree) assert P.dim == expected_dim @pytest.mark.parametrize("degree, expected_dim", [(1, 4), (2, 9), (3, 16)]) def xtest_continuous_lagrange_quadrilateral_spectral(degree, expected_dim): "Test space dimensions of continuous TensorProduct elements (quadrilateral)." P = basix.ufl.element("Lagrange", "quadrilateral", degree, variant="spectral") assert P.dim == expected_dim @pytest.mark.parametrize("degree, expected_dim", [(0, 1), (1, 3), (2, 6), (3, 10)]) def test_discontinuous_lagrange(degree, expected_dim): "Test space dimensions of discontinuous Lagrange elements." P = basix.ufl.element("DG", "triangle", degree) assert P.dim == expected_dim @pytest.mark.parametrize("degree, expected_dim", [(0, 3), (1, 9), (2, 18), (3, 30)]) def test_regge(degree, expected_dim): "Test space dimensions of generalized Regge element." P = basix.ufl.element("Regge", "triangle", degree) assert P.dim == expected_dim @pytest.mark.parametrize("degree, expected_dim", [(0, 3), (1, 9), (2, 18), (3, 30)]) def xtest_hhj(degree, expected_dim): "Test space dimensions of Hellan-Herrmann-Johnson element." P = basix.ufl.element("HHJ", "triangle", degree) assert P.dim == expected_dim class TestFunctionValues(): """These tests examine tabulate gives the correct answers for a the supported (non-mixed) for low degrees""" # FIXME: Add tests for NED and BDM/RT in 3D. # Shape (basis) functions on reference element reference_interval_1 = [lambda x: 1 - x[0], lambda x: x[0]] reference_triangle_1 = [lambda x: 1 - x[0] - x[1], lambda x: x[0], lambda x: x[1]] reference_tetrahedron_1 = [lambda x: 1 - x[0] - x[1] - x[2], lambda x: x[0], lambda x: x[1], lambda x: x[2]] reference_triangle_bdm1 = [lambda x: (2 * x[0], -x[1]), lambda x: (-x[0], 2 * x[1]), lambda x: (2 - 2 * x[0] - 3 * x[1], x[1]), lambda x: (- 1 + x[0] + 3 * x[1], - 2 * x[1]), lambda x: (-x[0], -2 + 3 * x[0] + 2 * x[1]), lambda x: (2 * x[0], 1 - 3 * x[0] - x[1])] reference_triangle_rt1 = [lambda x: (-x[0], -x[1]), lambda x: (x[0] - 1, x[1]), lambda x: (-x[0], 1 - x[1])] reference_triangle_rt2 = [lambda x: (x[0] - 3 * x[0]**2, x[1] - 3 * x[0] * x[1]), lambda x: (x[0] - 3 * x[0] * x[1], x[1] - 3 * x[1]**2), lambda x: (-2 + 5 * x[0] + 3 * x[1] - 3 * x[0] * x[1] - 3 * x[0]**2, 2 * x[1] - 3 * x[0] * x[1] - 3 * x[1]**2), lambda x: (1.0 - x[0] - 3 * x[1] + 3 * x[0] * x[1], x[1] + 3 * x[1]**2), lambda x: (-2 * x[0] + 3 * x[0] * x[1] + 3 * x[0] ** 2, 2 - 3 * x[0] - 5 * x[1] + 3 * x[0] * x[1] + 3 * x[1]**2), lambda x: (x[0] - 3 * x[0]**2, -1 + 3 * x[0] + x[1] - 3 * x[0] * x[1]), lambda x: (-6 * x[0] + 3 * x[0] * x[1] + 6 * x[0]**2, -3 * x[1] + 6 * x[0] * x[1] + 3 * x[1]**2), lambda x: (-3 * x[0] + 6 * x[0] * x[1] + 3 * x[0]**2, -6 * x[1] + 3 * x[0] * x[1] + 6 * x[1]**2)] reference_triangle_ned1 = [lambda x: (-x[1], x[0]), lambda x: (x[1], 1 - x[0]), lambda x: (1.0 - x[1], x[0])] reference_tetrahedron_rt1 = [lambda x: (2 ** 0.5 * x[0], 2 ** 0.5 * x[1], 2 ** 0.5 * x[2]), lambda x: (2 ** 0.5 - 2 ** 0.5 * x[0], -2 ** 0.5 * x[1], -2 ** 0.5 * x[2]), lambda x: (2 ** 0.5 * x[0], 2 ** 0.5 * x[1] - 2 ** 0.5, 2 ** 0.5 * x[2]), lambda x: (-2 ** 0.5 * x[0], -2 ** 0.5 * x[1], 2 ** 0.5 - 2 ** 0.5 * x[2])] reference_tetrahedron_bdm1 = [lambda x: (-3 * x[0], x[1], x[2]), lambda x: (x[0], -3 * x[1], x[2]), lambda x: (x[0], x[1], -3 * x[2]), lambda x: (-3.0 + 3 * x[0] + 4 * x[1] + 4 * x[2], -x[1], -x[2]), lambda x: (1.0 - x[0] - 4 * x[1], 3 * x[1], -x[2]), lambda x: (1.0 - x[0] - 4 * x[2], -x[1], 3 * x[2]), lambda x: (x[0], 3.0 - 4 * x[0] - 3 * x[1] - 4 * x[2], x[2]), lambda x: (-3 * x[0], -1.0 + 4 * x[0] + x[1], x[2]), lambda x: (x[0], -1.0 + x[1] + 4 * x[2], -3 * x[2]), lambda x: (-x[0], -x[1], -3.0 + 4 * x[0] + 4 * x[1] + 3 * x[2]), lambda x: (3 * x[0], -x[1], 1.0 - 4 * x[0] - x[2]), lambda x: (-x[0], 3 * x[1], 1.0 - 4 * x[1] - x[2])] reference_tetrahedron_ned1 = [lambda x: (0.0, -x[2], x[1]), lambda x: (-x[2], 0.0, x[0]), lambda x: (-x[1], x[0], 0.0), lambda x: (x[2], x[2], 1.0 - x[0] - x[1]), lambda x: (x[1], 1.0 - x[0] - x[2], x[1]), lambda x: (1.0 - x[1] - x[2], x[0], x[0])] reference_quadrilateral_1 = [lambda x: (1 - x[0]) * (1 - x[1]), lambda x: (1 - x[0]) * x[1], lambda x: x[0] * (1 - x[1]), lambda x: x[0] * x[1]] reference_hexahedron_1 = [lambda x: (1 - x[0]) * (1 - x[1]) * (1 - x[2]), lambda x: (1 - x[0]) * (1 - x[1]) * x[2], lambda x: (1 - x[0]) * x[1] * (1 - x[2]), lambda x: (1 - x[0]) * x[1] * x[2], lambda x: x[0] * (1 - x[1]) * (1 - x[2]), lambda x: x[0] * (1 - x[1]) * x[2], lambda x: x[0] * x[1] * (1 - x[2]), lambda x: x[0] * x[1] * x[2]] # Tests to perform tests = [("Lagrange", "interval", 1, reference_interval_1), ("Lagrange", "triangle", 1, reference_triangle_1), ("Lagrange", "tetrahedron", 1, reference_tetrahedron_1), # ("Lagrange", "quadrilateral", 1, reference_quadrilateral_1), # ("Lagrange", "hexahedron", 1, reference_hexahedron_1), ("Discontinuous Lagrange", "interval", 1, reference_interval_1), ("Discontinuous Lagrange", "triangle", 1, reference_triangle_1), ("Discontinuous Lagrange", "tetrahedron", 1, reference_tetrahedron_1), # ("Brezzi-Douglas-Marini", "triangle", 1, reference_triangle_bdm1), ("Raviart-Thomas", "triangle", 1, reference_triangle_rt1), # ("Raviart-Thomas", "triangle", 2, reference_triangle_rt2), # ("Discontinuous Raviart-Thomas", "triangle", 1, reference_triangle_rt1), # ("Discontinuous Raviart-Thomas", "triangle", 2, reference_triangle_rt2), ("N1curl", "triangle", 1, reference_triangle_ned1), ("Raviart-Thomas", "tetrahedron", 1, reference_tetrahedron_rt1), # ("Discontinuous Raviart-Thomas", "tetrahedron", 1, reference_tetrahedron_rt1), # ("Brezzi-Douglas-Marini", "tetrahedron", 1, reference_tetrahedron_bdm1), ("N1curl", "tetrahedron", 1, reference_tetrahedron_ned1)] @pytest.mark.parametrize("family, cell, degree, reference", tests) def test_values(self, family, cell, degree, reference): # Create element e = basix.ufl.element(family, cell, degree) # Get some points and check basis function values at points points = [random_point(element_coords(cell)) for i in range(5)] for x in points: table = e.tabulate(0, np.array([x], dtype=np.float64)) basis = table[0] if sum(e.value_shape()) == 1: for i, value in enumerate(basis[0]): assert np.isclose(value, reference[i](x)) else: for i, ref in enumerate(reference): assert np.allclose(basis[0][i::len(reference)], ref(x)) ffcx-0.7.0/test/test_jit_expression.py000066400000000000000000000176221450721277100201170ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Copyright (C) 2019-2022 Michal Habera and Jørgen S. Dokken # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import cffi import numpy as np import basix import basix.ufl import ffcx.codegeneration.jit import ufl from ffcx.codegeneration.utils import cdtype_to_numpy, scalar_to_value_type def float_to_type(name): """Map a string name to C and NumPy types""" if name == "double": return "double", np.float64 elif name == "double complex": return "double _Complex", np.complex128 elif name == "float": return "float", np.float32 elif name == "float complex": return "float _Complex", np.complex64 elif name == "long double": return "long double", np.longdouble else: raise RuntimeError("Unknown C type for: {}".format(name)) def test_matvec(compile_args): """Test evaluation of linear rank-0 form. Evaluates expression c * A_ij * f_j where c is a Constant, A_ij is a user specified constant matrix and f_j is j-th component of user specified vector-valued finite element function (in P1 space). """ e = basix.ufl.element("P", "triangle", 1, shape=(2, )) mesh = ufl.Mesh(e) V = ufl.FunctionSpace(mesh, e) f = ufl.Coefficient(V) a_mat = np.array([[1.0, 2.0], [1.0, 1.0]]) a = ufl.as_matrix(a_mat) expr = ufl.Constant(mesh) * ufl.dot(a, f) points = np.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]]) obj, module, code = ffcx.codegeneration.jit.compile_expressions( [(expr, points)], cffi_extra_compile_args=compile_args) ffi = cffi.FFI() expression = obj[0] c_type, np_type = float_to_type("double") A = np.zeros((3, 2), dtype=np_type) f_mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) # Coefficient storage XYXYXY w = np.array(f_mat.T.flatten(), dtype=np_type) c = np.array([0.5], dtype=np_type) entity_index = np.array([0], dtype=np.intc) quad_perm = np.array([0], dtype=np.dtype("uint8")) geom_type = scalar_to_value_type(c_type) np_gtype = cdtype_to_numpy(geom_type) # Coords storage XYZXYZXYZ coords = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=np_gtype) expression.tabulate_tensor_float64( ffi.cast('{type} *'.format(type=c_type), A.ctypes.data), ffi.cast('{type} *'.format(type=c_type), w.ctypes.data), ffi.cast('{type} *'.format(type=c_type), c.ctypes.data), ffi.cast(f'{geom_type} *', coords.ctypes.data), ffi.cast('int *', entity_index.ctypes.data), ffi.cast('uint8_t *', quad_perm.ctypes.data)) # Check the computation against correct NumPy value assert np.allclose(A, 0.5 * np.dot(a_mat, f_mat).T) # Prepare NumPy array of points attached to the expression length = expression.num_points * expression.topological_dimension points_kernel = np.frombuffer(ffi.buffer(expression.points, length * ffi.sizeof("double")), np.double) points_kernel = points_kernel.reshape(points.shape) assert np.allclose(points, points_kernel) # Check the value shape attached to the expression value_shape = np.frombuffer(ffi.buffer(expression.value_shape, expression.num_components * ffi.sizeof("int")), np.intc) assert np.allclose(expr.ufl_shape, value_shape) def test_rank1(compile_args): """Tests evaluation of rank-1 form. Builds a linear operator which takes vector-valued functions in P1 space and evaluates expression [u_y, u_x] + grad(u_x) at specified points. """ e = basix.ufl.element("P", "triangle", 1, shape=(2, )) mesh = ufl.Mesh(e) V = ufl.FunctionSpace(mesh, e) u = ufl.TrialFunction(V) expr = ufl.as_vector([u[1], u[0]]) + ufl.grad(u[0]) points = np.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]]) obj, module, code = ffcx.codegeneration.jit.compile_expressions( [(expr, points)], cffi_extra_compile_args=compile_args) ffi = cffi.FFI() expression = obj[0] c_type, np_type = float_to_type("double") geom_type = scalar_to_value_type(c_type) np_gtype = cdtype_to_numpy(geom_type) # 2 components for vector components of expression # 3 points of evaluation # 6 degrees of freedom for rank1 form A = np.zeros((3, 2, 6), dtype=np_type) # Coefficient storage XYXYXY w = np.array([0.0], dtype=np_type) c = np.array([0.0], dtype=np_type) entity_index = np.array([0], dtype=np.intc) quad_perm = np.array([0], dtype=np.dtype("uint8")) # Coords storage XYZXYZXYZ coords = np.zeros((points.shape[0], 3), dtype=np_gtype) coords[:, :2] = points expression.tabulate_tensor_float64( ffi.cast('{type} *'.format(type=c_type), A.ctypes.data), ffi.cast('{type} *'.format(type=c_type), w.ctypes.data), ffi.cast('{type} *'.format(type=c_type), c.ctypes.data), ffi.cast(f'{geom_type} *', coords.ctypes.data), ffi.cast('int *', entity_index.ctypes.data), ffi.cast('uint8_t *', quad_perm.ctypes.data)) f = np.array([[1.0, 2.0, 3.0], [-4.0, -5.0, 6.0]]) # Apply the operator on some test input data u_ffcx = np.einsum("ijk,k", A, f.T.flatten()) # Compute the correct values using NumPy # Gradf0 is gradient of f[0], each component of the gradient is constant gradf0 = np.array([[f[0, 1] - f[0, 0], f[0, 1] - f[0, 0], f[0, 1] - f[0, 0]], [f[0, 2] - f[0, 0], f[0, 2] - f[0, 0], f[0, 2] - f[0, 0]]]) u_correct = np.array([f[1], f[0]]) + gradf0 assert np.allclose(u_ffcx, u_correct.T) def test_elimiate_zero_tables_tensor(compile_args): """ Test elimination of tensor-valued expressions with zero tables """ cell = "tetrahedron" c_el = basix.ufl.element("P", cell, 1, shape=(3, )) mesh = ufl.Mesh(c_el) e = basix.ufl.element("P", cell, 1) V = ufl.FunctionSpace(mesh, e) u = ufl.Coefficient(V) expr = ufl.sym(ufl.as_tensor([[u, u.dx(0).dx(0), 0], [u.dx(1), u.dx(1), 0], [0, 0, 0]])) # Get vertices of cell # Coords storage XYZXYZXYZ basix_c_e = basix.create_element(basix.ElementFamily.P, basix.cell.string_to_type(cell), 1, discontinuous=False) coords = basix_c_e.points # Using same basix element for coordinate element and coefficient coeff_points = basix_c_e.points # Compile expression at interpolation points of second order Lagrange space b_el = basix.create_element(basix.ElementFamily.P, basix.cell.string_to_type(cell), 0, discontinuous=True) points = b_el.points obj, module, code = ffcx.codegeneration.jit.compile_expressions( [(expr, points)], cffi_extra_compile_args=compile_args) ffi = cffi.FFI() expression = obj[0] c_type, np_type = float_to_type("double") output = np.zeros(9 * points.shape[0], dtype=np_type) # Define coefficients for u = x + 2 * y u_coeffs = u_coeffs = coeff_points.T[0] + 2 * coeff_points.T[1] consts = np.array([], dtype=np_type) entity_index = np.array([0], dtype=np.intc) quad_perm = np.array([0], dtype=np.dtype("uint8")) expression.tabulate_tensor_float64( ffi.cast('{type} *'.format(type=c_type), output.ctypes.data), ffi.cast('{type} *'.format(type=c_type), u_coeffs.ctypes.data), ffi.cast('{type} *'.format(type=c_type), consts.ctypes.data), ffi.cast('double *', coords.ctypes.data), ffi.cast('int *', entity_index.ctypes.data), ffi.cast('uint8_t *', quad_perm.ctypes.data)) def exact_expr(x): val = np.zeros((9, x.shape[1]), dtype=np_type) val[0] = x[0] + 2 * x[1] val[1] = 0 + 0.5 * 2 val[3] = 0.5 * 2 + 0 val[4] = 2 return val.T exact = exact_expr(points.T) assert np.allclose(exact, output) ffcx-0.7.0/test/test_jit_forms.py000066400000000000000000001101521450721277100170360ustar00rootroot00000000000000# Copyright (C) 2018-2020 Garth N. Wells & Matthew Scroggs # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import numpy as np import pytest import sympy from sympy.abc import x, y, z import basix.ufl import ffcx.codegeneration.jit import ufl from ffcx.codegeneration.utils import cdtype_to_numpy, scalar_to_value_type @pytest.mark.parametrize("mode,expected_result", [ ("double", np.array([[1.0, -0.5, -0.5], [-0.5, 0.5, 0.0], [-0.5, 0.0, 0.5]], dtype=np.float64)), ("double _Complex", np.array( [[1.0 + 0j, -0.5 + 0j, -0.5 + 0j], [-0.5 + 0j, 0.5 + 0j, 0.0 + 0j], [-0.5 + 0j, 0.0 + 0j, 0.5 + 0j]], dtype=np.complex128)), ]) def test_laplace_bilinear_form_2d(mode, expected_result, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) kappa = ufl.Constant(domain, shape=(2, 2)) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.tr(kappa) * ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) ffi = module.ffi form0 = compiled_forms[0] offsets = form0.form_integral_offsets cell = module.lib.cell assert offsets[cell + 1] - offsets[cell] == 1 integral_id = form0.form_integral_ids[offsets[cell]] assert integral_id == -1 default_integral = form0.form_integrals[offsets[cell]] np_type = cdtype_to_numpy(mode) A = np.zeros((3, 3), dtype=np_type) w = np.array([], dtype=np_type) kappa_value = np.array([[1.0, 2.0], [3.0, 4.0]]) c = np.array(kappa_value.flatten(), dtype=np_type) geom_type = scalar_to_value_type(mode) np_gtype = cdtype_to_numpy(geom_type) coords = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=np_gtype) kernel = getattr(default_integral, f"tabulate_tensor_{np_type}") kernel(ffi.cast('{type} *'.format(type=mode), A.ctypes.data), ffi.cast('{type} *'.format(type=mode), w.ctypes.data), ffi.cast('{type} *'.format(type=mode), c.ctypes.data), ffi.cast(f'{geom_type} *', coords.ctypes.data), ffi.NULL, ffi.NULL) assert np.allclose(A, np.trace(kappa_value) * expected_result) @pytest.mark.parametrize("mode,expected_result", [ ("float", np.array( [[1.0 / 12.0, 1.0 / 24.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 12.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 24.0, 1.0 / 12.0]], dtype=np.float32)), ("long double", np.array( [[1.0 / 12.0, 1.0 / 24.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 12.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 24.0, 1.0 / 12.0]], dtype=np.longdouble)), ("double", np.array( [[1.0 / 12.0, 1.0 / 24.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 12.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 24.0, 1.0 / 12.0]], dtype=np.float64)), ("double _Complex", np.array( [[1.0 / 12.0, 1.0 / 24.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 12.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 24.0, 1.0 / 12.0]], dtype=np.complex128)), ("float _Complex", np.array( [[1.0 / 12.0, 1.0 / 24.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 12.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 24.0, 1.0 / 12.0]], dtype=np.complex64)), ]) def test_mass_bilinear_form_2d(mode, expected_result, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(u, v) * ufl.dx L = ufl.conj(v) * ufl.dx forms = [a, L] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) form0 = compiled_forms[0].form_integrals[0] form1 = compiled_forms[1].form_integrals[0] np_type = cdtype_to_numpy(mode) A = np.zeros((3, 3), dtype=np_type) w = np.array([], dtype=np_type) c = np.array([], dtype=np_type) geom_type = scalar_to_value_type(mode) np_gtype = cdtype_to_numpy(geom_type) ffi = module.ffi coords = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=np_gtype) kernel0 = ffi.cast(f"ufcx_tabulate_tensor_{np_type} *", getattr(form0, f"tabulate_tensor_{np_type}")) kernel0(ffi.cast('{type} *'.format(type=mode), A.ctypes.data), ffi.cast('{type} *'.format(type=mode), w.ctypes.data), ffi.cast('{type} *'.format(type=mode), c.ctypes.data), ffi.cast(f'{geom_type} *', coords.ctypes.data), ffi.NULL, ffi.NULL) b = np.zeros(3, dtype=np_type) kernel1 = ffi.cast(f"ufcx_tabulate_tensor_{np_type} *", getattr(form1, f"tabulate_tensor_{np_type}")) kernel1(ffi.cast('{type} *'.format(type=mode), b.ctypes.data), ffi.cast('{type} *'.format(type=mode), w.ctypes.data), ffi.cast('{type} *'.format(type=mode), c.ctypes.data), ffi.cast(f'{geom_type} *', coords.ctypes.data), ffi.NULL, ffi.NULL) assert np.allclose(A, expected_result) assert np.allclose(b, 1.0 / 6.0) @pytest.mark.parametrize("mode,expected_result", [ ("double", np.array([[1.0, -0.5, -0.5], [-0.5, 0.5, 0.0], [-0.5, 0.0, 0.5]], dtype=np.float64) - (1.0 / 24.0) * np.array([[2, 1, 1], [1, 2, 1], [1, 1, 2]], dtype=np.float64)), ("double _Complex", np.array([[1.0, -0.5, -0.5], [-0.5, 0.5, 0.0], [-0.5, 0.0, 0.5]], dtype=np.complex128) - (1.0j / 24.0) * np.array([[2, 1, 1], [1, 2, 1], [1, 1, 2]], dtype=np.complex128)), ]) def test_helmholtz_form_2d(mode, expected_result, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) if mode == "double": k = 1.0 elif mode == "double _Complex": k = ufl.constantvalue.ComplexValue(1j) else: raise RuntimeError("Unknown mode type") a = (ufl.inner(ufl.grad(u), ufl.grad(v)) - ufl.inner(k * u, v)) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) form0 = compiled_forms[0].form_integrals[0] np_type = cdtype_to_numpy(mode) A = np.zeros((3, 3), dtype=np_type) w = np.array([], dtype=np_type) c = np.array([], dtype=np_type) geom_type = scalar_to_value_type(mode) np_gtype = cdtype_to_numpy(geom_type) ffi = module.ffi coords = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=np_gtype) kernel = getattr(form0, f"tabulate_tensor_{np_type}") kernel(ffi.cast('{type} *'.format(type=mode), A.ctypes.data), ffi.cast('{type} *'.format(type=mode), w.ctypes.data), ffi.cast('{type} *'.format(type=mode), c.ctypes.data), ffi.cast(f'{geom_type} *', coords.ctypes.data), ffi.NULL, ffi.NULL) assert np.allclose(A, expected_result) @pytest.mark.parametrize("mode,expected_result", [ ("double", np.array([[0.5, -1 / 6, -1 / 6, -1 / 6], [-1 / 6, 1 / 6, 0.0, 0.0], [-1 / 6, 0.0, 1 / 6, 0.0], [-1 / 6, 0.0, 0.0, 1 / 6]], dtype=np.float64)), ("double _Complex", np.array( [[0.5 + 0j, -1 / 6 + 0j, -1 / 6 + 0j, -1 / 6 + 0j], [-1 / 6 + 0j, 1 / 6 + 0j, 0.0 + 0j, 0.0 + 0j], [-1 / 6 + 0j, 0.0 + 0j, 1 / 6 + 0j, 0.0 + 0j], [-1 / 6 + 0j, 0.0 + 0j, 0.0 + 0j, 1 / 6 + 0j]], dtype=np.complex128)), ]) def test_laplace_bilinear_form_3d(mode, expected_result, compile_args): element = basix.ufl.element("Lagrange", "tetrahedron", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "tetrahedron", 1, shape=(3, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) form0 = compiled_forms[0].form_integrals[0] np_type = cdtype_to_numpy(mode) A = np.zeros((4, 4), dtype=np_type) w = np.array([], dtype=np_type) c = np.array([], dtype=np_type) geom_type = scalar_to_value_type(mode) np_gtype = cdtype_to_numpy(geom_type) ffi = module.ffi coords = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=np_gtype) kernel = getattr(form0, f"tabulate_tensor_{np_type}") kernel(ffi.cast('{type} *'.format(type=mode), A.ctypes.data), ffi.cast('{type} *'.format(type=mode), w.ctypes.data), ffi.cast('{type} *'.format(type=mode), c.ctypes.data), ffi.cast(f'{geom_type} *', coords.ctypes.data), ffi.NULL, ffi.NULL) assert np.allclose(A, expected_result) def test_form_coefficient(compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TestFunction(space), ufl.TrialFunction(space) g = ufl.Coefficient(space) a = g * ufl.inner(u, v) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms(forms, cffi_extra_compile_args=compile_args) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) form0 = compiled_forms[0].form_integrals[0] A = np.zeros((3, 3), dtype=np.float64) w = np.array([1.0, 1.0, 1.0], dtype=np.float64) c = np.array([], dtype=np.float64) perm = np.array([0], dtype=np.uint8) ffi = module.ffi coords = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=np.float64) kernel = getattr(form0, "tabulate_tensor_float64") kernel(ffi.cast('double *', A.ctypes.data), ffi.cast('double *', w.ctypes.data), ffi.cast('double *', c.ctypes.data), ffi.cast('double *', coords.ctypes.data), ffi.NULL, ffi.cast('uint8_t *', perm.ctypes.data)) A_analytic = np.array([[2, 1, 1], [1, 2, 1], [1, 1, 2]], dtype=np.float64) / 24.0 A_diff = (A - A_analytic) assert np.isclose(A_diff.max(), 0.0) assert np.isclose(A_diff.min(), 0.0) def test_subdomains(compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a0 = ufl.inner(u, v) * ufl.dx + ufl.inner(u, v) * ufl.dx(2) a1 = ufl.inner(u, v) * ufl.dx(2) + ufl.inner(u, v) * ufl.dx a2 = ufl.inner(u, v) * ufl.dx(2) + ufl.inner(u, v) * ufl.dx(1) a3 = ufl.inner(u, v) * ufl.ds(210) + ufl.inner(u, v) * ufl.ds(0) forms = [a0, a1, a2, a3] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={'scalar_type': 'double'}, cffi_extra_compile_args=compile_args) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) form0 = compiled_forms[0] offsets = form0.form_integral_offsets cell = module.lib.cell ids = [form0.form_integral_ids[j] for j in range(offsets[cell], offsets[cell + 1])] assert ids[0] == -1 and ids[1] == 2 form1 = compiled_forms[1] offsets = form1.form_integral_offsets ids = [form1.form_integral_ids[j] for j in range(offsets[cell], offsets[cell + 1])] assert ids[0] == -1 and ids[1] == 2 form2 = compiled_forms[2] offsets = form2.form_integral_offsets ids = [form2.form_integral_ids[j] for j in range(offsets[cell], offsets[cell + 1])] assert ids[0] == 1 and ids[1] == 2 form3 = compiled_forms[3] offsets = form3.form_integral_offsets assert offsets[cell + 1] - offsets[cell] == 0 exf = module.lib.exterior_facet ids = [form3.form_integral_ids[j] for j in range(offsets[exf], offsets[exf + 1])] assert ids[0] == 0 and ids[1] == 210 @pytest.mark.parametrize("mode", ["double", "double _Complex"]) def test_interior_facet_integral(mode, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a0 = ufl.inner(ufl.jump(ufl.grad(u)), ufl.jump(ufl.grad(v))) * ufl.dS forms = [a0] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) ffi = module.ffi form0 = compiled_forms[0] ffi = module.ffi np_type = cdtype_to_numpy(mode) integral0 = form0.form_integrals[0] A = np.zeros((6, 6), dtype=np_type) w = np.array([], dtype=np_type) c = np.array([], dtype=np.float64) facets = np.array([0, 2], dtype=np.intc) perms = np.array([0, 1], dtype=np.uint8) geom_type = scalar_to_value_type(mode) np_gtype = cdtype_to_numpy(geom_type) coords = np.array([[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0]], dtype=np_gtype) kernel = getattr(integral0, f"tabulate_tensor_{np_type}") kernel(ffi.cast(f'{mode} *', A.ctypes.data), ffi.cast(f'{mode} *', w.ctypes.data), ffi.cast(f'{mode} *', c.ctypes.data), ffi.cast(f'{geom_type} *', coords.ctypes.data), ffi.cast('int *', facets.ctypes.data), ffi.cast('uint8_t *', perms.ctypes.data)) @pytest.mark.parametrize("mode", ["double", "double _Complex"]) def test_conditional(mode, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) x = ufl.SpatialCoordinate(domain) condition = ufl.Or(ufl.ge(ufl.real(x[0] + x[1]), 0.1), ufl.ge(ufl.real(x[1] + x[1]**2), 0.1)) c1 = ufl.conditional(condition, 2.0, 1.0) a = c1 * ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx x1x2 = ufl.real(x[0] + ufl.as_ufl(2) * x[1]) c2 = ufl.conditional(ufl.ge(x1x2, 0), 6.0, 0.0) b = c2 * ufl.conj(v) * ufl.dx forms = [a, b] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args) form0 = compiled_forms[0].form_integrals[0] form1 = compiled_forms[1].form_integrals[0] ffi = module.ffi np_type = cdtype_to_numpy(mode) A1 = np.zeros((3, 3), dtype=np_type) w1 = np.array([1.0, 1.0, 1.0], dtype=np_type) c = np.array([], dtype=np.float64) geom_type = scalar_to_value_type(mode) np_gtype = cdtype_to_numpy(geom_type) coords = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=np_gtype) kernel0 = ffi.cast(f"ufcx_tabulate_tensor_{np_type} *", getattr(form0, f"tabulate_tensor_{np_type}")) kernel0(ffi.cast('{type} *'.format(type=mode), A1.ctypes.data), ffi.cast('{type} *'.format(type=mode), w1.ctypes.data), ffi.cast('{type} *'.format(type=mode), c.ctypes.data), ffi.cast(f'{geom_type} *', coords.ctypes.data), ffi.NULL, ffi.NULL) expected_result = np.array([[2, -1, -1], [-1, 1, 0], [-1, 0, 1]], dtype=np_type) assert np.allclose(A1, expected_result) A2 = np.zeros(3, dtype=np_type) w2 = np.array([1.0, 1.0, 1.0], dtype=np_type) kernel1 = ffi.cast(f"ufcx_tabulate_tensor_{np_type} *", getattr(form1, f"tabulate_tensor_{np_type}")) kernel1(ffi.cast('{type} *'.format(type=mode), A2.ctypes.data), ffi.cast('{type} *'.format(type=mode), w2.ctypes.data), ffi.cast('{type} *'.format(type=mode), c.ctypes.data), ffi.cast(f'{geom_type} *', coords.ctypes.data), ffi.NULL, ffi.NULL) expected_result = np.ones(3, dtype=np_type) assert np.allclose(A2, expected_result) def test_custom_quadrature(compile_args): ve = basix.ufl.element("P", "triangle", 1, shape=(2, )) mesh = ufl.Mesh(ve) e = basix.ufl.element("P", mesh.ufl_cell().cellname(), 2) V = ufl.FunctionSpace(mesh, e) u, v = ufl.TrialFunction(V), ufl.TestFunction(V) points = [[0.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.5, 0.5], [0.0, 0.5], [0.5, 0.0]] weights = [1 / 12] * 6 a = u * v * ufl.dx(metadata={"quadrature_rule": "custom", "quadrature_points": points, "quadrature_weights": weights}) forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms(forms, cffi_extra_compile_args=compile_args) ffi = module.ffi form = compiled_forms[0] default_integral = form.form_integrals[0] A = np.zeros((6, 6), dtype=np.float64) w = np.array([], dtype=np.float64) c = np.array([], dtype=np.float64) coords = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=np.float64) kernel = getattr(default_integral, "tabulate_tensor_float64") kernel(ffi.cast("double *", A.ctypes.data), ffi.cast("double *", w.ctypes.data), ffi.cast("double *", c.ctypes.data), ffi.cast("double *", coords.ctypes.data), ffi.NULL, ffi.NULL) # Check that A is diagonal assert np.count_nonzero(A - np.diag(np.diagonal(A))) == 0 def test_curl_curl(compile_args): V = basix.ufl.element("N1curl", "triangle", 2) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, V) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(ufl.curl(u), ufl.curl(v)) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms(forms, cffi_extra_compile_args=compile_args) def lagrange_triangle_symbolic(order, corners=[(1, 0), (2, 0), (0, 1)], fun=lambda i: i): from sympy import S poly_basis = [x**i * y**j for i in range(order + 1) for j in range(order + 1 - i)] # vertices eval_points = [S(c) for c in corners] # edges for e in [(1, 2), (0, 2), (0, 1)]: p0 = corners[e[0]] p1 = corners[e[1]] if order > 3: raise NotImplementedError elif order == 3: eval_points += [tuple(S(a) + (b - a) * i for a, b in zip(p0, p1)) for i in [(1 - 1 / sympy.sqrt(5)) / 2, (1 + 1 / sympy.sqrt(5)) / 2]] else: eval_points += [tuple(S(a) + sympy.Rational((b - a) * i, order) for a, b in zip(p0, p1)) for i in range(1, order)] # face for f in [(0, 1, 2)]: p0 = corners[f[0]] p1 = corners[f[1]] p2 = corners[f[2]] eval_points += [tuple(S(a) + sympy.Rational((b - a) * i, order) + sympy.Rational((c - a) * j, order) for a, b, c in zip(p0, p1, p2)) for i in range(1, order) for j in range(1, order - i)] dual_mat = [[f.subs(x, p[0]).subs(y, p[1]) for p in eval_points] for f in poly_basis] dual_mat = sympy.Matrix(dual_mat) mat = dual_mat.inv() functions = [sum(i * j for i, j in zip(mat.row(k), poly_basis)) for k in range(mat.rows)] results = [] for f in functions: integrand = fun(f) results.append(integrand.integrate((x, 1 - y, 2 - 2 * y), (y, 0, 1))) return results @pytest.mark.parametrize("mode", ["double"]) @pytest.mark.parametrize("sym_fun,ufl_fun", [ (lambda i: i, lambda i: i), (lambda i: i.diff(x), lambda i: ufl.grad(i)[0]), (lambda i: i.diff(y), lambda i: ufl.grad(i)[1])]) @pytest.mark.parametrize("order", [1, 2, 3]) def test_lagrange_triangle(compile_args, order, mode, sym_fun, ufl_fun): sym = lagrange_triangle_symbolic(order, fun=sym_fun) element = basix.ufl.element("Lagrange", "triangle", order) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, element) v = ufl.TestFunction(space) a = ufl_fun(v) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args) ffi = module.ffi form0 = compiled_forms[0] assert form0.form_integral_offsets[module.lib.cell + 1] == 1 default_integral = form0.form_integrals[0] np_type = cdtype_to_numpy(mode) b = np.zeros((order + 2) * (order + 1) // 2, dtype=np_type) w = np.array([], dtype=np_type) geom_type = scalar_to_value_type(mode) np_gtype = cdtype_to_numpy(geom_type) coords = np.array([[1.0, 0.0, 0.0], [2.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=np_gtype) kernel = getattr(default_integral, f"tabulate_tensor_{np_type}") kernel(ffi.cast('{type} *'.format(type=mode), b.ctypes.data), ffi.cast('{type} *'.format(type=mode), w.ctypes.data), ffi.NULL, ffi.cast(f'{geom_type} *', coords.ctypes.data), ffi.NULL, ffi.NULL) # Check that the result is the same as for sympy assert np.allclose(b, [float(i) for i in sym]) def lagrange_tetrahedron_symbolic(order, corners=[(1, 0, 0), (2, 0, 0), (0, 1, 0), (0, 0, 1)], fun=lambda i: i): from sympy import S poly_basis = [ x**i * y**j * z**k for i in range(order + 1) for j in range(order + 1 - i) for k in range(order + 1 - i - j)] # vertices eval_points = [S(c) for c in corners] # edges for e in [(2, 3), (1, 3), (1, 2), (0, 3), (0, 2), (0, 1)]: p0 = corners[e[0]] p1 = corners[e[1]] if order > 3: raise NotImplementedError elif order == 3: eval_points += [tuple(S(a) + (b - a) * i for a, b in zip(p0, p1)) for i in [(1 - 1 / sympy.sqrt(5)) / 2, (1 + 1 / sympy.sqrt(5)) / 2]] else: eval_points += [tuple(S(a) + sympy.Rational((b - a) * i, order) for a, b in zip(p0, p1)) for i in range(1, order)] # face for f in [(1, 2, 3), (0, 2, 3), (0, 1, 3), (0, 1, 2)]: p0 = corners[f[0]] p1 = corners[f[1]] p2 = corners[f[2]] eval_points += [tuple(S(a) + sympy.Rational((b - a) * i, order) + sympy.Rational((c - a) * j, order) for a, b, c in zip(p0, p1, p2)) for i in range(1, order) for j in range(1, order - i)] # interior for v in [(0, 1, 2, 3)]: p0 = corners[v[0]] p1 = corners[v[1]] p2 = corners[v[2]] p3 = corners[v[3]] eval_points += [tuple(S(a) + sympy.Rational((b - a) * i, order) + sympy.Rational((c - a) * j, order) + sympy.Rational((d - a) * k, order) for a, b, c, d in zip(p0, p1, p2, p3)) for i in range(1, order) for j in range(1, order - i) for k in range(1, order - i - j)] dual_mat = [[f.subs(x, p[0]).subs(y, p[1]).subs(z, p[2]) for p in eval_points] for f in poly_basis] dual_mat = sympy.Matrix(dual_mat) mat = dual_mat.inv() functions = [sum(i * j for i, j in zip(mat.row(k), poly_basis)) for k in range(mat.rows)] results = [] for f in functions: integrand = fun(f) results.append(integrand.integrate((x, 1 - y - z, 2 - 2 * y - 2 * z), (y, 0, 1 - z), (z, 0, 1))) return results @pytest.mark.parametrize("mode", ["double"]) @pytest.mark.parametrize("sym_fun,ufl_fun", [ (lambda i: i, lambda i: i), (lambda i: i.diff(x), lambda i: ufl.grad(i)[0]), (lambda i: i.diff(y), lambda i: ufl.grad(i)[1])]) @pytest.mark.parametrize("order", [1, 2, 3]) def test_lagrange_tetrahedron(compile_args, order, mode, sym_fun, ufl_fun): sym = lagrange_tetrahedron_symbolic(order, fun=sym_fun) element = basix.ufl.element("Lagrange", "tetrahedron", order) domain = ufl.Mesh(basix.ufl.element("Lagrange", "tetrahedron", 1, shape=(3, ))) space = ufl.FunctionSpace(domain, element) v = ufl.TestFunction(space) a = ufl_fun(v) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args) ffi = module.ffi form0 = compiled_forms[0] assert form0.form_integral_offsets[module.lib.cell + 1] == 1 default_integral = form0.form_integrals[0] np_type = cdtype_to_numpy(mode) b = np.zeros((order + 3) * (order + 2) * (order + 1) // 6, dtype=np_type) w = np.array([], dtype=np_type) geom_type = scalar_to_value_type(mode) np_gtype = cdtype_to_numpy(geom_type) coords = np.array([1.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=np_gtype) kernel = getattr(default_integral, f"tabulate_tensor_{np_type}") kernel(ffi.cast('{type} *'.format(type=mode), b.ctypes.data), ffi.cast('{type} *'.format(type=mode), w.ctypes.data), ffi.NULL, ffi.cast(f'{geom_type} *', coords.ctypes.data), ffi.NULL, ffi.NULL) # Check that the result is the same as for sympy assert np.allclose(b, [float(i) for i in sym]) def test_prism(compile_args): element = basix.ufl.element("Lagrange", "prism", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "prism", 1, shape=(3, ))) space = ufl.FunctionSpace(domain, element) v = ufl.TestFunction(space) L = v * ufl.dx forms = [L] compiled_forms, module, _ = ffcx.codegeneration.jit.compile_forms( forms, options={'scalar_type': 'double'}, cffi_extra_compile_args=compile_args) ffi = module.ffi form0 = compiled_forms[0] assert form0.form_integral_offsets[module.lib.cell + 1] == 1 default_integral = form0.form_integrals[0] b = np.zeros(6, dtype=np.float64) coords = np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0], dtype=np.float64) kernel = getattr(default_integral, "tabulate_tensor_float64") kernel(ffi.cast('double *', b.ctypes.data), ffi.NULL, ffi.NULL, ffi.cast('double *', coords.ctypes.data), ffi.NULL, ffi.NULL) assert np.isclose(sum(b), 0.5) def test_complex_operations(compile_args): mode = "double _Complex" cell = "triangle" c_element = basix.ufl.element("Lagrange", cell, 1, shape=(2, )) mesh = ufl.Mesh(c_element) element = basix.ufl.element("DG", cell, 0, shape=(2, )) V = ufl.FunctionSpace(mesh, element) u = ufl.Coefficient(V) J1 = ufl.real(u)[0] * ufl.imag(u)[1] * ufl.conj(u)[0] * ufl.dx J2 = ufl.real(u[0]) * ufl.imag(u[1]) * ufl.conj(u[0]) * ufl.dx forms = [J1, J2] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={'scalar_type': mode}, cffi_extra_compile_args=compile_args) form0 = compiled_forms[0].form_integrals[0] form1 = compiled_forms[1].form_integrals[0] ffi = module.ffi np_type = cdtype_to_numpy(mode) w1 = np.array([3 + 5j, 8 - 7j], dtype=np_type) c = np.array([], dtype=np_type) geom_type = scalar_to_value_type(mode) np_gtype = cdtype_to_numpy(geom_type) coords = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=np_gtype) J_1 = np.zeros((1), dtype=np_type) kernel0 = ffi.cast(f"ufcx_tabulate_tensor_{np_type} *", getattr(form0, f"tabulate_tensor_{np_type}")) kernel0(ffi.cast('{type} *'.format(type=mode), J_1.ctypes.data), ffi.cast('{type} *'.format(type=mode), w1.ctypes.data), ffi.cast('{type} *'.format(type=mode), c.ctypes.data), ffi.cast(f'{geom_type} *', coords.ctypes.data), ffi.NULL, ffi.NULL) expected_result = np.array([0.5 * np.real(w1[0]) * np.imag(w1[1]) * (np.real(w1[0]) - 1j * np.imag(w1[0]))], dtype=np_type) assert np.allclose(J_1, expected_result) J_2 = np.zeros((1), dtype=np_type) kernel1 = ffi.cast(f"ufcx_tabulate_tensor_{np_type} *", getattr(form1, f"tabulate_tensor_{np_type}")) kernel1(ffi.cast('{type} *'.format(type=mode), J_2.ctypes.data), ffi.cast('{type} *'.format(type=mode), w1.ctypes.data), ffi.cast('{type} *'.format(type=mode), c.ctypes.data), ffi.cast(f'{geom_type} *', coords.ctypes.data), ffi.NULL, ffi.NULL) assert np.allclose(J_2, expected_result) assert np.allclose(J_1, J_2) def test_invalid_function_name(compile_args): # Monkey patch to force invalid name old_str = ufl.Coefficient.__str__ ufl.Coefficient.__str__ = lambda self: "invalid function name" V = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2, ))) space = ufl.FunctionSpace(domain, V) u = ufl.Coefficient(space) a = ufl.inner(u, u) * ufl.dx forms = [a] try: compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, cffi_extra_compile_args=compile_args) except ValueError: pass except Exception: raise RuntimeError("Compilation should fail with ValueError.") # Revert monkey patch for other tests ufl.Coefficient.__str__ = old_str def test_interval_vertex_quadrature(compile_args): c_el = basix.ufl.element("Lagrange", "interval", 1, shape=(1, )) mesh = ufl.Mesh(c_el) x = ufl.SpatialCoordinate(mesh) dx = ufl.Measure( "dx", metadata={"quadrature_rule": "vertex"}) b = x[0] * dx forms = [b] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, cffi_extra_compile_args=compile_args) ffi = module.ffi form0 = compiled_forms[0] assert form0.form_integral_offsets[module.lib.cell + 1] == 1 default_integral = form0.form_integrals[0] J = np.zeros(1, dtype=np.float64) a = np.pi b = np.exp(1) coords = np.array([a, 0.0, 0.0, b, 0.0, 0.0], dtype=np.float64) kernel = getattr(default_integral, "tabulate_tensor_float64") kernel(ffi.cast('double *', J.ctypes.data), ffi.NULL, ffi.NULL, ffi.cast('double *', coords.ctypes.data), ffi.NULL, ffi.NULL) assert np.isclose(J[0], (0.5 * a + 0.5 * b) * np.abs(b - a)) def test_facet_vertex_quadrature(compile_args): """Test facet vertex quadrature""" c_el = basix.ufl.element("Lagrange", "quadrilateral", 1, shape=(2,)) mesh = ufl.Mesh(c_el) x = ufl.SpatialCoordinate(mesh) ds = ufl.Measure( "ds", metadata={"quadrature_rule": "vertex"}) expr = (x[0] + ufl.cos(x[1])) b1 = expr * ds ds_c = ufl.Measure( "ds", metadata={ "quadrature_rule": "custom", "quadrature_points": np.array([[0.0], [1.0]]), "quadrature_weights": np.array([1.0 / 2.0, 1.0 / 2.0]), } ) b2 = expr * ds_c forms = [b1, b2] compiled_forms, module, _ = ffcx.codegeneration.jit.compile_forms( forms, cffi_extra_compile_args=compile_args) ffi = module.ffi assert len(compiled_forms) == 2 solutions = [] for form in compiled_forms: offsets = form.form_integral_offsets exf = module.lib.exterior_facet assert offsets[exf + 1] - offsets[exf] == 1 default_integral = form.form_integrals[offsets[exf]] J = np.zeros(1, dtype=np.float64) a = np.pi b = np.exp(1) coords = np.array([a, 0.1, 0.0, a + b, 0.0, 0.0, a, a, 0., a + 2 * b, a, 0.], dtype=np.float64) # First facet is between vertex 0 and 1 in coords facets = np.array([0], dtype=np.intc) kernel = getattr(default_integral, "tabulate_tensor_float64") kernel(ffi.cast('double *', J.ctypes.data), ffi.NULL, ffi.NULL, ffi.cast('double *', coords.ctypes.data), ffi.cast('int *', facets.ctypes.data), ffi.NULL) solutions.append(J[0]) # Test against exact result assert np.isclose(J[0], (0.5 * (a + np.cos(0.1)) + 0.5 * (a + b + np.cos(0))) * np.sqrt(b**2 + 0.1**2)) # Compare custom quadrature with vertex quadrature assert np.isclose(solutions[0], solutions[1]) def test_manifold_derivatives(compile_args): """Test higher order derivatives on manifolds""" c_el = basix.ufl.element("Lagrange", "interval", 1, shape=(2,), gdim=2) mesh = ufl.Mesh(c_el) x = ufl.SpatialCoordinate(mesh) dx = ufl.Measure("dx", domain=mesh) order = 4 el = basix.ufl.element("Lagrange", "interval", order, gdim=2) V = ufl.FunctionSpace(mesh, el) u = ufl.Coefficient(V) d = 5.3 f_ex = d * order * (order - 1) * x[1]**(order - 2) expr = u.dx(1).dx(1) - f_ex J = expr * expr * dx compiled_forms, module, _ = ffcx.codegeneration.jit.compile_forms( [J], cffi_extra_compile_args=compile_args) default_integral = compiled_forms[0].form_integrals[0] scale = 2.5 coords = np.array([0.0, 0.0, 0.0, 0.0, scale, 0.0], dtype=np.float64) dof_coords = scale * el.element.points.reshape(-1) w = np.array([d * d_c**order for d_c in dof_coords], dtype=np.float64) c = np.array([], dtype=np.float64) perm = np.array([0], dtype=np.uint8) ffi = module.ffi J = np.zeros(1, dtype=np.float64) kernel = getattr(default_integral, "tabulate_tensor_float64") kernel(ffi.cast('double *', J.ctypes.data), ffi.cast('double *', w.ctypes.data), ffi.cast('double *', c.ctypes.data), ffi.cast('double *', coords.ctypes.data), ffi.NULL, ffi.cast('uint8_t *', perm.ctypes.data)) assert np.isclose(J[0], 0.0) def test_integral_grouping(compile_args): """We group integrals with common integrands to avoid duplicated integration kernels. This means that `inner(u, v)*dx((1,2,3)) + inner(grad(u), grad(v))*dx(2) + inner(u,v)*dx` is grouped as 1. `inner(u,v)*dx(("everywhere", 1, 3))` 2. `(inner(grad(u), grad(v)) + inner(u, v))*dx(2)` Each of the forms has one generated `tabulate_tensor_*` function, which is referred to multiple times in `integrals_` and `integral_ids_` """ mesh = ufl.Mesh(ufl.VectorElement("Lagrange", ufl.triangle, 1)) V = ufl.FunctionSpace(mesh, ufl.FiniteElement("Lagrange", ufl.triangle, 1)) u = ufl.TrialFunction(V) v = ufl.TestFunction(V) a = ufl.inner(u, v) * ufl.dx((1, 2, 3)) + ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx(2) + ufl.inner(u, v) * ufl.dx compiled_forms, module, _ = ffcx.codegeneration.jit.compile_forms( [a], cffi_extra_compile_args=compile_args) # NOTE: This assumes that the first integral type is cell integrals, see UFCx.h cell = module.lib.cell num_integrals = compiled_forms[0].form_integral_offsets[cell + 1] - compiled_forms[0].form_integral_offsets[cell] assert num_integrals == 4 unique_integrals = set([compiled_forms[0].form_integrals[compiled_forms[0].form_integral_offsets[cell] + i] for i in range(num_integrals)]) assert len(unique_integrals) == 2 ffcx-0.7.0/test/test_lnodes.py000066400000000000000000000067051450721277100163360ustar00rootroot00000000000000 from ffcx.codegeneration import lnodes as L from ffcx.codegeneration.C.c_implementation import CFormatter from cffi import FFI import numpy as np import pytest import importlib @pytest.mark.parametrize("scalar", ("float", "double", "int")) def test_gemm(scalar): # Test LNodes simple matrix-matrix multiply in C p, q, r = 5, 16, 12 A = L.Symbol("A", dtype=L.DataType.SCALAR) B = L.Symbol("B", dtype=L.DataType.SCALAR) C = L.Symbol("C", dtype=L.DataType.SCALAR) code = [L.Comment(f"Matrix multiply A{p,r} = B{p,q} * C{q,r}")] i = L.Symbol("i", dtype=L.DataType.INT) j = L.Symbol("j", dtype=L.DataType.INT) k = L.Symbol("k", dtype=L.DataType.INT) m_ij = L.MultiIndex([i, j], [p, q]) m_ik = L.MultiIndex([i, k], [p, r]) m_jk = L.MultiIndex([j, k], [q, r]) body = [L.AssignAdd(A[m_ik], B[m_ij] * C[m_jk])] body = [L.ForRange(i, 0, p, body=body)] body = [L.ForRange(j, 0, q, body=body)] code += [L.ForRange(k, 0, r, body=body)] # Format into C and compile with CFFI Q = CFormatter(scalar=scalar) decl = f"void gemm({scalar} *A, {scalar} *B, {scalar} *C)" c_code = decl + "{\n" + \ Q.c_format(L.StatementList(code)) + "\n}\n" ffibuilder = FFI() ffibuilder.cdef(decl + ";") ffibuilder.set_source(f"_gemm_{scalar}", c_code) ffibuilder.compile(verbose=True) _gemm = importlib.import_module(f"_gemm_{scalar}") gemm = _gemm.lib.gemm ffi = _gemm.ffi c_to_np = {"double": np.float64, "float": np.float32, "int": np.int32} np_scalar = c_to_np.get(scalar) A = np.zeros((p, r), dtype=np_scalar) B = np.ones((p, q), dtype=np_scalar) C = np.ones((q, r), dtype=np_scalar) pA = ffi.cast(f"{scalar} *", A.ctypes.data) pB = ffi.cast(f"{scalar} *", B.ctypes.data) pC = ffi.cast(f"{scalar} *", C.ctypes.data) gemm(pA, pB, pC) assert np.all(A == q) @pytest.mark.parametrize("scalar", ("float", "double", "int")) def test_gemv(scalar): # Test LNodes simple matvec multiply in C p, q = 5, 16 y = L.Symbol("y", dtype=L.DataType.SCALAR) A = L.Symbol("A", dtype=L.DataType.SCALAR) x = L.Symbol("x", dtype=L.DataType.SCALAR) code = [L.Comment(f"Matrix-vector multiply y({p}) = A{p,q} * x({q})")] i = L.Symbol("i", dtype=L.DataType.INT) j = L.Symbol("j", dtype=L.DataType.INT) m_ij = L.MultiIndex([i, j], [p, q]) body = [L.AssignAdd(y[i], A[m_ij] * x[j])] body = [L.ForRange(i, 0, p, body=body)] code += [L.ForRange(j, 0, q, body=body)] # Format into C and compile with CFFI Q = CFormatter(scalar=scalar) decl = f"void gemm({scalar} *y, {scalar} *A, {scalar} *x)" c_code = decl + "{\n" + \ Q.c_format(L.StatementList(code)) + "\n}\n" ffibuilder = FFI() ffibuilder.cdef(decl + ";") ffibuilder.set_source(f"_gemv_{scalar}", c_code) ffibuilder.compile(verbose=True) _gemv = importlib.import_module(f"_gemv_{scalar}") gemv = _gemv.lib.gemm ffi = _gemv.ffi c_to_np = {"double": np.float64, "float": np.float32, "int": np.int32} np_scalar = c_to_np.get(scalar) y = np.arange(p, dtype=np_scalar) x = np.arange(q, dtype=np_scalar) A = np.outer(y, x) py = ffi.cast(f"{scalar} *", y.ctypes.data) pA = ffi.cast(f"{scalar} *", A.ctypes.data) px = ffi.cast(f"{scalar} *", x.ctypes.data) # Compute expected result s2 = q * (q - 1) * (2 * q - 1) // 6 + 1 result = np.arange(p, dtype=np_scalar) * s2 gemv(py, pA, px) assert np.all(y == result)