pax_global_header00006660000000000000000000000064147014266630014523gustar00rootroot0000000000000052 comment=2ef366842d1ea5faf053d201364df66f6a693819 ffcx-0.9.0/000077500000000000000000000000001470142666300124575ustar00rootroot00000000000000ffcx-0.9.0/.github/000077500000000000000000000000001470142666300140175ustar00rootroot00000000000000ffcx-0.9.0/.github/FUNDING.yml000066400000000000000000000002141470142666300156310ustar00rootroot00000000000000# These are supported funding model platforms github: FEniCS # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] ffcx-0.9.0/.github/workflows/000077500000000000000000000000001470142666300160545ustar00rootroot00000000000000ffcx-0.9.0/.github/workflows/build-wheels.yml000066400000000000000000000042771470142666300211750ustar00rootroot00000000000000name: Build wheels # By default this action does not push to test or production PyPI. The wheels # are available as an artifact that can be downloaded and tested locally. on: workflow_dispatch: inputs: ffcx_ref: description: "FFCx git ref to checkout" default: "main" type: string test_pypi_publish: description: "Publish to Test PyPi (true | false)" default: false type: boolean pypi_publish: description: "Publish to PyPi (true | false)" default: false type: boolean workflow_call: inputs: ffcx_ref: description: "FFCx git ref to checkout" default: "main" type: string test_pypi_publish: description: "Publish to Test PyPi (true | false)" default: false type: boolean pypi_publish: description: "Publish to PyPi (true | false))" default: false type: boolean jobs: build: name: Build wheels and source distributions runs-on: ubuntu-latest steps: - name: Checkout FFCx uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.ffcx_ref }} - name: Upgrade pip and setuptools run: python -m pip install setuptools pip build --upgrade - name: Build sdist and wheel run: python -m build . - uses: actions/upload-artifact@v4 with: path: dist/* upload_pypi: name: Upload to PyPI (optional) needs: [build] runs-on: ubuntu-latest steps: - uses: actions/download-artifact@v4 with: name: artifact path: dist - name: Publish to PyPI uses: pypa/gh-action-pypi-publish@release/v1 if: ${{ github.event.inputs.pypi_publish == 'true' }} with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} repository-url: https://upload.pypi.org/legacy/ - name: Publish to Test PyPI uses: pypa/gh-action-pypi-publish@release/v1 if: ${{ github.event.inputs.test_pypi_publish == 'true' }} with: user: __token__ password: ${{ secrets.PYPI_TEST_TOKEN }} repository-url: https://test.pypi.org/legacy/ ffcx-0.9.0/.github/workflows/dolfinx-tests.yml000066400000000000000000000063211470142666300214040ustar00rootroot00000000000000# This workflow will install Basix, FFCx, DOLFINx and run the DOLFINx unit tests. name: DOLFINx integration on: pull_request: branches: - main workflow_dispatch: inputs: dolfinx_ref: description: "DOLFINx branch or tag" default: "main" type: string basix_ref: description: "Basix branch or tag" default: "main" type: string ufl_ref: description: "UFL branch or tag" default: "main" type: string jobs: build: name: Run DOLFINx tests runs-on: ubuntu-latest container: ghcr.io/fenics/test-env:current-openmpi env: PETSC_ARCH: linux-gnu-complex64-32 OMPI_ALLOW_RUN_AS_ROOT: 1 OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 steps: - uses: actions/checkout@v4 - name: Install UFL and Basix (default branches/tags) if: github.event_name != 'workflow_dispatch' run: | python3 -m pip install --break-system-packages git+https://github.com/FEniCS/ufl.git python3 -m pip install --break-system-packages git+https://github.com/FEniCS/basix.git - name: Install UFL and Basix (specified branches/tags) if: github.event_name == 'workflow_dispatch' run: | python3 -m pip install --break-system-packages git+https://github.com/FEniCS/ufl.git@${{ github.event.inputs.ufl_ref }} python3 -m pip install --break-system-packages git+https://github.com/FEniCS/basix.git@${{ github.event.inputs.basix_ref }} - name: Install FFCx run: | pip3 install --break-system-packages . - name: Get DOLFINx source (default branch/tag) if: github.event_name != 'workflow_dispatch' uses: actions/checkout@v4 with: path: ./dolfinx repository: FEniCS/dolfinx ref: main - name: Get DOLFINx source (specified branch/tag) if: github.event_name == 'workflow_dispatch' uses: actions/checkout@v4 with: path: ./dolfinx repository: FEniCS/dolfinx ref: ${{ github.event.inputs.dolfinx_ref }} - name: Install DOLFINx (C++) run: | cmake -G Ninja -DCMAKE_BUILD_TYPE=Developer -B build -S dolfinx/cpp/ cmake --build build cmake --install build - name: Install DOLFINx (Python) run: | python3 -m pip -v install --break-system-packages nanobind scikit-build-core[pyproject] python3 -m pip -v install --break-system-packages --check-build-dependencies --no-build-isolation dolfinx/python/ - name: Build DOLFINx C++ unit tests run: | cmake -G Ninja -DCMAKE_BUILD_TYPE=Developer -B build/test/ -S dolfinx/cpp/test/ cmake --build build/test - name: Run DOLFINx C++ unit tests run: | cd build/test ctest -V --output-on-failure -R unittests - name: Install Python demo/test dependencies run: python3 -m pip install --break-system-packages matplotlib numba pyamg pytest pytest-xdist scipy - name: Run DOLFINx Python unit tests run: python3 -m pytest -n auto dolfinx/python/test/unit - name: Run DOLFINx Python demos run: python3 -m pytest -n=2 -m serial dolfinx/python/demo/test.py ffcx-0.9.0/.github/workflows/pythonapp.yml000066400000000000000000000131101470142666300206150ustar00rootroot00000000000000# This workflow will install Python dependencies, run tests and lint # with a single version of Python For more information see: # https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions name: FFCx CI on: push: branches: - "**" tags: - "v*" pull_request: branches: - main workflow_dispatch: jobs: build: runs-on: ${{ matrix.os }} strategy: matrix: os: [ubuntu-latest] python-version: ['3.9', '3.10', '3.11', '3.12'] include: - os: macos-latest python-version: '3.12' steps: - name: Checkout FFCx uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Export GitHub Actions cache environment variables (Windows) if: runner.os == 'Windows' uses: actions/github-script@v6 with: script: | core.exportVariable('ACTIONS_CACHE_URL', process.env.ACTIONS_CACHE_URL || ''); core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || ''); - name: Install dependencies (non-Python, Linux) if: runner.os == 'Linux' run: | sudo apt-get install -y graphviz libgraphviz-dev ninja-build pkg-config - name: Install dependencies (non-Python, macOS) if: runner.os == 'macOS' run: brew install ninja pkg-config - name: Install FEniCS dependencies (Python, Unix) if: runner.os == 'Linux' || runner.os == 'macOS' run: | pip install git+https://github.com/FEniCS/ufl.git pip install git+https://github.com/FEniCS/basix.git - name: Install FEniCS dependencies (Python, Windows) if: runner.os == 'Windows' env: VCPKG_BINARY_SOURCES: "clear;x-gha,readwrite" run: | pip install git+https://github.com/FEniCS/ufl.git pip install -v git+https://github.com/FEniCS/basix.git --config-settings=cmake.args=-DINSTALL_RUNTIME_DEPENDENCIES=ON --config-settings=cmake.args=-DCMAKE_TOOLCHAIN_FILE=C:/vcpkg/scripts/buildsystems/vcpkg.cmake - name: Install FFCx (Linux, with optional dependencies) if: runner.os == 'Linux' run: pip install .[ci,optional] - name: Install FFCx (macOS, Windows) if: runner.os != 'Linux' run: pip install .[ci] - name: Static check with mypy run: mypy ffcx/ - name: ruff checks run: | ruff check . ruff format --check . - name: Run units tests run: python -m pytest -n auto --cov=ffcx/ --junitxml=junit/test-results-${{ matrix.os }}-${{ matrix.python-version }}.xml test/ - name: Upload to Coveralls if: ${{ github.repository == 'FEniCS/ffcx' && github.head_ref == '' && matrix.os == 'ubuntu-latest' && matrix.python-version == '3.8' }} env: COVERALLS_REPO_TOKEN: ${{ secrets.COVERALLS_REPO_TOKEN }} run: coveralls continue-on-error: true - name: Upload pytest results uses: actions/upload-artifact@v4 with: name: pytest-results-${{ matrix.os }}-${{ matrix.python-version }} path: junit/test-results-${{ matrix.os }}-${{ matrix.python-version }}.xml # Use always() to always run this step to publish test results # when there are test failures if: always() - name: Setup cl.exe (Windows) if: runner.os == 'Windows' uses: ilammy/msvc-dev-cmd@v1 - name: Run FFCx demos run: | pytest demo/test_demos.py - name: Build documentation run: | cd doc make html - name: Upload documentation artifact uses: actions/upload-artifact@v4 with: name: doc-${{ matrix.os }}-${{ matrix.python-version }} path: doc/build/html/ retention-days: 2 if-no-files-found: error - name: Checkout FEniCS/docs if: ${{ github.repository == 'FEniCS/ffcx' && ( github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') ) && runner.os == 'Linux' && matrix.python-version == 3.8 }} uses: actions/checkout@v4 with: repository: "FEniCS/docs" path: "docs" ssh-key: "${{ secrets.SSH_GITHUB_DOCS_PRIVATE_KEY }}" - name: Set version name if: ${{ github.repository == 'FEniCS/ffcx' && ( github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') ) && runner.os == 'Linux' && matrix.python-version == 3.8 }} run: | echo "VERSION_NAME=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - name: Copy documentation into repository if: ${{ github.repository == 'FEniCS/ffcx' && ( github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') ) && runner.os == 'Linux' && matrix.python-version == 3.8 }} run: | cd docs git rm -r --ignore-unmatch ffcx/${{ env.VERSION_NAME }} mkdir -p ffcx/${{ env.VERSION_NAME }} cp -r ../doc/build/html/* ffcx/${{ env.VERSION_NAME }} - name: Commit and push documentation to FEniCS/docs if: ${{ github.repository == 'FEniCS/ffcx' && ( github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') ) && runner.os == 'Linux' && matrix.python-version == 3.8 }} run: | cd docs git config --global user.email "fenics@github.com" git config --global user.name "FEniCS GitHub Actions" git add --all git commit --allow-empty -m "Python FEniCS/ffcx@${{ github.sha }}" git push ffcx-0.9.0/.github/workflows/spack.yml000066400000000000000000000040221470142666300176760ustar00rootroot00000000000000name: Spack install on: # Uncomment the below 'push' to trigger on push # push: # branches: # - "**" schedule: # '*' is a special character in YAML, so string must be quoted - cron: "0 2 * * SUN" workflow_dispatch: inputs: spack_repo: description: "Spack repository to test" default: "spack/spack" type: string spack_ref: description: "Spack repository branch/tag to test" default: "develop" type: string ffcx_version: description: "Spack build tag" default: "main" type: string jobs: build: runs-on: ubuntu-latest container: ubuntu:latest steps: - name: Install Spack requirements run: | apt-get -y update apt-get install -y bzip2 curl file git gzip make patch python3-minimal tar xz-utils apt-get install -y g++ gfortran # compilers - name: Get Spack if: github.event_name != 'workflow_dispatch' uses: actions/checkout@v4 with: path: ./spack repository: spack/spack - name: Get Spack if: github.event_name == 'workflow_dispatch' uses: actions/checkout@v4 with: path: ./spack repository: ${{ github.event.inputs.spack_repo }} ref: ${{ github.event.inputs.spack_ref }} - name: Install FFCx and run tests if: github.event_name != 'workflow_dispatch' run: | . ./spack/share/spack/setup-env.sh spack env create ffcx-main spack env activate ffcx-main spack add py-fenics-ffcx@main spack install # spack install --test=root - name: Install FFCx and run tests if: github.event_name == 'workflow_dispatch' run: | . ./spack/share/spack/setup-env.sh spack env create ffcx-testing spack env activate ffcx-testing spack add py-fenics-ffcx@${{ github.event.inputs.ffcx_version }} spack install # spack install --test=root ffcx-0.9.0/AUTHORS000066400000000000000000000045261470142666300135360ustar00rootroot00000000000000Credits for FFC =============== Main authors: Anders Logg email: logg@simula.no www: http://home.simula.no/~logg/ Kristian B. Ølgaard email: k.b.oelgaard@gmail.com Marie Rognes email: meg@simula.no Main contributors: Garth N. Wells email: gnw20@cam.ac.uk www: http://www.eng.cam.ac.uk/~gnw20/ Martin Sandve Alnæs email: martinal@simula.no Contributors: Jan Blechta email: blechta@karlin.mff.cuni.cz Peter Brune email: brune@uchicago.edu Joachim B Haga email: jobh@broadpark.no Johan Jansson email: johanjan@math.chalmers.se www: http://www.math.chalmers.se/~johanjan/ Robert C. Kirby email: kirby@cs.uchicago.edu www: http://people.cs.uchicago.edu/~kirby/ Matthew G. Knepley email: knepley@mcs.anl.gov www: http://www-unix.mcs.anl.gov/~knepley/ Dag Lindbo email: dag@f.kth.se www: http://www.f.kth.se/~dag/ Ola Skavhaug email: skavhaug@simula.no www: http://home.simula.no/~skavhaug/ Andy R. Terrel email: aterrel@uchicago.edu www: http://people.cs.uchicago.edu/~aterrel/ Ivan Yashchuk email: ivan.yashchuk@aalto.fi Matthew Scroggs email: matthew.scroggs.14@ucl.ac.uk www: https://mscroggs.co.uk Credits for UFC =============== UFC was merged into FFC 2014-02-18. Below is the list of credits for UFC at the time of the merge. Main authors: Martin Sandve Alnaes Anders Logg Kent-Andre Mardal Hans Petter Langtangen Main contributors: Asmund Odegard Kristian Oelgaard Johan Hake Garth N. Wells Marie E. Rognes Johannes Ring Credits for UFLACS ================== UFLACS was merged into FFC 2016-02-16. Author: Martin Sandve Alnæs Contributors: Anders Logg Garth N. Wells Johannes Ring Matthias Liertzer Steffen Müthing ffcx-0.9.0/CODE_OF_CONDUCT.md000066400000000000000000000072501470142666300152620ustar00rootroot00000000000000Code of Conduct =============== Our Pledge ---------- In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. Our Standards ------------- Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others’ private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting Our Responsibilities -------------------- Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. Scope ----- This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. Enforcement ----------- Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fenics-steering-council@googlegroups.com. Alternatively, you may report individually to one of the members of the Steering Council. Complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project’s leadership. If you feel that your report has not been followed up satisfactorily, then you may contact our parent organisation NumFOCUS at info@numfocus.org for further redress. Attribution ----------- This Code of Conduct is adapted from the Contributor Covenant, version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html. Adaptations ----------- * Allow reporting to individual Steering Council members * Added the option to contact NumFOCUS for further redress. For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faqffcx-0.9.0/COPYING000066400000000000000000001045131470142666300135160ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . ffcx-0.9.0/COPYING.LESSER000066400000000000000000000167271470142666300145230ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. ffcx-0.9.0/ChangeLog.rst000066400000000000000000000611741470142666300150510ustar00rootroot00000000000000Changelog ========= 0.6.0 ----- See https://github.com/FEniCS/ffcx/compare/v0.5.0...v0.6.0 0.5.0 ----- See: https://github.com/FEniCS/ffcx/compare/v0.5.0...v0.4.0 0.4.0 ----- See: https://github.com/FEniCS/ffcx/compare/v0.4.0...v0.3.0 0.3.0 ----- See: https://github.com/FEniCS/ffcx/compare/v0.3.0...v0.2.0 0.2.0 ----- - No changes 0.1.0 ----- Alpha release of ffcx 2018.2.0.dev0 ------------- - No changes 2018.1.0.dev0 (no release) -------------------------- - Forked FFCx 2017.2.0 (2017-12-05) --------------------- - Some fixes for ufc::eval for esoteric element combinations - Reimplement code generation for all ufc classes with new class ufc::coordinate_mapping which can map between coordinates, compute jacobians, etc. for a coordinate mapping parameterized by a specific finite element. - New functions in ufc::finite_element: - evaluate_reference_basis - evaluate_reference_basis_derivatives - transform_reference_basis_derivatives - tabulate_reference_dof_coordinates - New functions in ufc::dofmap: - num_global_support_dofs - num_element_support_dofs - Improved docstrings for parts of ufc.h - FFC now accepts Q and DQ finite element families defined on quadrilaterals and hexahedrons - Some fixes for ufc_geometry.h for quadrilateral and hexahedron cells 2017.1.0.post2 (2017-09-12) --------------------------- - Change PyPI package name to fenics-ffc. 2017.1.0 (2017-05-09) --------------------- - Let ffc -O parameter take an optional integer level like -O2, -O0 - Implement blockwise optimizations in uflacs code generation - Expose uflacs optimization parameters through parameter system 2016.2.0 (2016-11-30) --------------------- - Jit compiler now compiles elements separately from forms to avoid duplicate work - Add parameter max_signature_length to optionally shorten signatures in the jit cache - Move uflacs module into ffc.uflacs - Remove installation of pkg-config and CMake files (UFC path and compiler flags are available from ffc module) - Add dependency on dijitso and remove dependency on instant - Add experimental Bitbucket pipelines - Tidy the repo after UFC and UFLACS merge, and general spring cleanup. This includes removal of instructions how to merge two repos, commit hash c8389032268041fe94682790cb773663bdf27286. 2016.1.0 (2016-06-23) --------------------- - Add function get_ufc_include to get path to ufc.h - Merge UFLACS into FFC - Generalize ufc interface to non-affine parameterized coordinates - Add ufc::coordinate_mapping class - Make ufc interface depend on C++11 features requiring gcc version >= 4.8 - Add function ufc_signature() to the form compiler interface - Add function git_commit_hash() 1.6.0 (2015-07-28) ------------------ - Rename and modify a number of UFC interface functions. See docstrings in ufc.h for details. - Bump required SWIG version to 3.0.3 - Disable dual basis (tabulate_coordinates and evaluate_dofs) for enriched elements until correct implementation is brought up 1.5.0 (2015-01-12) ------------------ - Remove FErari support - Add support for new integral type custom_integral - Support for new form compiler backend "uflacs", downloaded separately 1.4.0 (2014-06-02) ------------------ - Add support for integrals that know which coefficients they use - Many bug fixes for facet integrals over manifolds - Merge UFC into FFC; ChangeLog for UFC appended below - Various updates mirroring UFL changes - Experimental: New custom integral with user defined quadrature points 1.3.0 (2014-01-07) ------------------ - Fix bug with runtime check of SWIG version - Move DOLFIN wrappers here from DOLFIN - Add support for new UFL operators cell_avg and facet_avg - Add new reference data handling system, now data is kept in an external repository - Fix bugs with ignoring quadrature rule arguments - Use cpp optimization by default in jit compiler 1.2.0 (2013-03-24) ------------------ - New feature: Add basic support for point integrals on vertices - New feature: Add general support for m-dimensional cells in n-dimensional space (n >= m, n, m = 1, 2, 3) 1.1.0 (2013-01-07) ------------------ - Fix bug for Conditionals related to DG constant Coefficients. Bug #1082048. - Fix bug for Conditionals, precedence rules for And and Or. Bug #1075149. - Changed data structure from list to deque when pop(0) operation is needed, speeding up split_expression operation considerable - Other minor fixes 1.0.0 (2011-12-07) ------------------ - Issue warning when form integration requires more than 100 points 1.0-rc1 (2011-11-28) -------------------- - Fix bug with coordinates on facet integrals (intervals). Bug #888682. - Add support for FacetArea, new geometric quantity in UFL. - Fix bug in optimised quadrature code, AlgebraOperators demo. Bug #890859. - Fix bug with undeclared variables in optimised quadrature code. Bug #883202. 1.0-beta2 (2011-10-11) ---------------------- - Added support for bessel functions, bessel_* (I,J,K,Y), in UFL. - Added support for error function, erf(), new math function in UFL. - Fix dof map 'need_entities' for Real spaces - Improve performance for basis function computation 1.0-beta (2011-08-11) --------------------- - Improve formatting of floats with up to one non-zero decimal place. - Fix bug involving zeros in products and sums. Bug #804160. - Fix bug for new conditions '&&', '||' and '!' in UFL. Bug #802560. - Fix bug involving VectorElement with dim=1. Bug #798578. - Fix bug with mixed element of symmetric tensor elements. Bug #745646. - Fix bug when using geometric coordinates with one quadrature point 0.9.10 (2011-05-16) ------------------- - Change license from GPL v3 or later to LGPL v3 or later - Add some schemes for low-order simplices - Request quadrature schemes by polynomial degree (not longer by number of points in each direction) - Get quadrature schemes via ffc.quadrature_schemes - Improved lock handling in JIT compiler - Include common_cell in form signature - Add possibility to set swig binary and swig path 0.9.9 (2011-02-23) ------------------ - Add support for generating error control forms with option -e - Updates for UFC 2.0 - Set minimal degree to 1 in automatic degree selection for expressions - Add command-line option -f no_ferari - Add support for plotting of elements - Add utility function compute_tensor_representation 0.9.4 (2010-09-01) ------------------ - Added memory cache in jit(), for preprocessed forms - Added support for Conditional and added demo/Conditional.ufl. - Added support for new geometric quantity Circumradius in UFL. - Added support for new geometric quantity CellVolume in UFL. 0.9.3 (2010-07-01) ------------------ - Make global_dimension for Real return an int instead of double, bug # 592088 - Add support for facet normal in 1D. - Expose -feliminate_zeros for quadrature optimisations to give user more control - Remove return of form in compile_form - Remove object_names argument to compile_element - Rename ElementUnion -> EnrichedElement - Add support for tan() and inverse trigonometric functions - Added support for ElementUnion (i.e. span of combinations of elements) - Added support for Bubble elements - Added support for UFL.SpatialCoordinate. 0.9.2 (2010-02-17) ------------------ - Bug fix in removal of unused variables in Piola-mapped terms for tensor representation 0.9.1 (2010-02-15) ------------------ - Add back support for FErari optimizations - Bug fixes in JIT compiler 0.9.0 (2010-02-02) ------------------ - Updates for FIAT 0.9.0 - Updates for UFC 1.4.0 (now supporting the full interface) - Automatic selection of representation - Change quadrature_order --> quadrature_degree - Split compile() --> compile_form(), compile_element() - Major cleanup and reorganization of code (flatter directories) - Updates for changes in UFL: Argument, Coefficient, FormData 0.7.1 ----- - Handle setting quadrature degree when it is set to None in UFL form - Added demo: HyperElasticity.ufl 0.7.0 ----- - Move contents of TODO to: https://blueprints.launchpad.net/ffc - Support for restriction of finite elements to only consider facet dofs - Use quadrature_order from metadata when integrating terms using tensor representation - Use loop to reset the entries of the local element tensor - Added new symbolic classes for quadrature optimisation (speed up compilation) - Added demos: Biharmonic.ufl, div(grad(v)) term; ReactionDiffusion.ufl, tuple notation; MetaData.ufl, how to attach metadata to the measure; ElementRestriction.ufl, restriction of elements to facets - Tabulate the coordinates of the integration points in the tabulate_tensor() function - Change command line option '-f split_implementation' -> '-f split' - Renaming of files and restructuring of the compiler directory - Added option -q rule (--quadrature-rule rule) to specify which rule to use for integration of a given integral. (Can also bet set through the metadata through "quadrature_rule"). No rules have yet been implemented, so default is the FIAT rule. - Remove support for old style .form files/format 0.6.2 (2009-04-07) ------------------ - Experimental support for UFL, supporting both .form and .ufl - Moved configuration and construction of python extension module to ufc_module 0.6.1 (2009-02-18) ------------------ - Initial work on UFL transition - Minor bug fixes - The version of ufc and swig is included in the form signature - Better system configuration for JIT compiled forms - The JIT compiled python extension module use shared_ptr for all classes 0.6.0 (2009-01-05) ------------------ - Update DOLFIN output format (-l dolfin) for DOLFIN 0.9.0 - Cross-platform fixes for test scripts - Minor bug fix for quadrature code generation (forms affected by this bug would not be able to compile - Fix bug with output of ``*.py``. - Permit dot product bewteen rectangular matrices (Frobenius norm) 0.5.1 (2008-10-20) ------------------ - New operator skew() - Allow JIT compilation of elements and dof maps - Rewrite JIT compiler to rely on Instant for caching - Display flop count for evaluating the element tensor during compilation - Add arguments language and representation to options dictionary - Fix installation on Windows - Add option -f split_implementation for separate .h and .cpp files 0.5.0 (2008-06-23) ------------------ - Remove default restriction +/- for Constant - Make JIT optimization (-O0 / -O2) optional - Add in-memory cache to speed up JIT compiler for repeated assembly - Allow subdomain integrals without needing full range of integrals - Allow simple subdomain integral specification dx(0), dx(1), ds(0) etc 0.4.5 (2008-04-30) ------------------ - Optimizations in generated quadrature code - Change formatting of floats from %g to %e, fixes problem with too long integers - Bug fix for order of values in interpolate_vertex_values, now according to UFC - Speed up JIT compiler - Add index ranges to form printing - Throw runtime error in functions not generated - Update DOLFIN format for new location of include files 0.4.4 (2008-02-18) ------------------ - RT, BDM, BDFM and Nedelec now working in 2D and 3D - New element type QuadratureElement - Add support for 1D elements - Add experimental support for new Darcy-Stokes element - Use FIAT transformed spaces instead of mapping in FFC - Updates for UFC 1.1 - Implement caching of forms/modules in ~/.ffc/cache for JIT compiler - Add script ffc-clean - New operators lhs() and rhs() - Bug fixes in simplify - Bug fixes for Nedelec and BDFM - Fix bug in mult() - Fix bug with restrictions on exterior facet integrals - Fix bug in grad() for vectors - Add divergence operator for matrices 0.4.3 (2007-10-23) ------------------ - Require FIAT to use UFC reference cells - Fix bug in form simplification - Rename abs --> modulus to avoid conflict with builtin abs - Fix bug in operators invert, abs, sqrt - Fix bug in integral tabulation - Add BDFM and Nedelec elements (nonworking) - Fix bug in JIT compiler 0.4.2 (2007-08-31) ------------------ - Change license from GPL v2 to GPL v3 or later - Add JIT (just-in-time) compiler - Fix bug for constants on interior facets 0.4.1 (2007-06-22) ------------------ - Fix bug in simplification of forms - Optimize removal of unused terms in code formattting 0.4.0 (2007-06-20) ------------------ - Move to UFC interface for code generation - Major rewrite, restructure, cleanup - Add support for Brezzi-Douglas-Marini (BDM) elements - Add support for Raviart-Thomas (RT) elements - Add support for Discontinuous Galerkin (DG) methods - Operators jump() and avg() - Add quadrature compilation mode (experimental) - Simplification of forms - Operators sqrt(), abs() and inverse - Improved Python interface - Add flag -f precision=n - Generate code for basis functions and derivatives - Use Set from set module for Python2.3 compatibility 0.3.5 (2006-12-01) ------------------ - Bug fixes - Move from Numeric to numpy 0.3.4 (2006-10-27) ------------------ - Updates for new DOLFIN mesh library - Add support for evaluation of functionals - Add operator outer() for outer product of vector-valued functions - Enable optimization of linear forms (in addition to bilinear forms) - Remove DOLFIN SWIG format - Fix bug in ffc -v/--version (thanks to Ola Skavhaug) - Consolidate DOLFIN and DOLFIN SWIG formats (patch from Johan Jansson) - Fix bug in optimized compilation (-O) for some forms ("too many values to unpack") 0.3.3 (2006-09-05) ------------------ - Fix bug in operator div() - Add operation count (number of multiplications) with -d0 - Add hint for printing more informative error messages (flag -d1) - Modify implementation of vertexeval() - Add support for boundary integrals (Garth N. Wells) 0.3.2 (2006-04-01) ------------------ - Add support for FErari optimizations, new flag -O 0.3.1 (2006-03-28) ------------------ - Remove verbose output: silence means success - Generate empty boundary integral eval() to please Intel C++ compiler - New classes TestFunction and TrialFunction 0.3.0 (2006-03-01) ------------------ - Work on manual, document command-line and user-interfaces - Name change: u --> U - Add compilation of elements without form - Add generation of FiniteElementSpec in DOLFIN formats - Fix bugs in raw and XML formats - Fix bug in LaTeX format - Fix path and predefine tokens to enable import in .form file - Report number of entries in reference tensor during compilation 0.2.5 (2005-12-28) ------------------ - Add demo Stabilization.form - Further speedup computation of reference tensor (use ufunc Numeric.add) 0.2.4 (2005-12-05) ------------------ - Report time taken to compute reference tensor - Restructure computation of reference tensor to use less memory. As a side effect, the speed has also been improved. - Update for DOLFIN name change node --> vertex - Update finite element interface for DOLFIN - Check for FIAT bug in discontinuous vector Lagrange elements - Fix signatures for vector-valued elements 0.2.3 (2005-11-28) ------------------ - New fast Numeric/BLAS based algorithm for computing reference tensor - Bug fix: reassign indices for complete subexpressions - Bug fix: operator Function * Integral - Check tensor notation for completeness - Bug fix: mixed elements with more than two function spaces - Don't declare unused coefficients (or gcc will complain) 0.2.2 (2005-11-14) ------------------ - Add command-line argument -v / --version - Add new operator mean() for projection onto piecewise constants - Add support for projections - Bug fix for higher order mixed elements: declaration of edge/face_ordering - Generate code for sub elements of mixed elements - Add new test form: TensorWeighteLaplacian - Add new test form: EnergyNorm - Fix bugs in mult() and vec() (skavhaug) - Reset correct entries of G for interior in BLAS mode - Only assign to entries of G that meet nonzero entries of A in BLAS mode 0.2.1 (2005-10-11) ------------------ - Only generate declarations that are needed according to format - Check for missing options and add missing default options - Simplify usage of FFC as Python module: from ffc import * - Fix bug in division with constants - Generate output for BLAS (with option -f blas) - Add new XML output format - Remove command-line option --license (collect in compiler options -f) - Modify demo Mass.form to use 3:rd order Lagrange on tets - Fix bug in dofmap() for equal order mixed elements - Add compiler option -d debuglevel - Fix Python Numeric bug: vdot --> dot 0.2.0 (2005-09-23) ------------------ - Generate function vertexeval() for evaluation at vertices - Add support for arbitrary mixed elements - Add man page - Work on manual, chapters on form language, quickstart and installation - Handle exceptions gracefully in command-line interface - Use new template fenicsmanual.cls for manual - Add new operators grad, div, rot (curl), D, rank, trace, dot, cross - Factorize common reference tensors from terms with equal signatures - Collect small building blocks for form algebra in common module tokens.py 0.1.9 (2005-07-05) ------------------ - Complete support for general order Lagrange elements on triangles and tetrahedra - Compute reordering of dofs on tets correctly - Update manual with ordering of dofs - Break compilation into two phases: build() and write() - Add new output format ASE (Matt Knepley) - Improve python interface to FFC - Remove excessive logging at compilation - Fix bug in raw output format 0.1.8 (2005-05-17) ------------------ - Access data through map in DOLFIN format - Experimental support for computation of coordinate maps - Add first draft of manual - Experimental support for computation of dof maps - Allow specification of the number of components for vector Lagrange - Count the number of zeros dropped - Fix bug in handling command-line arguments - Use module sets instead of built-in set (fix for Python 2.3) - Handle constant indices correctly (bug reported by Garth N. Wells) 0.1.7 (2005-05-02) ------------------ - Write version number to output - Add command-line option for choosing license - Display usage if no input is given - Bug fix for finding correct prefix of file name - Automatically choose name of output file (if not supplied) - Use FIAT tabulation mode for vector-valued elements (speedup a factor 5) - Use FIAT tabulation mode for scalar elements (speedup a factor 1000) - Fig bug in demo elasticity.form (change order of u and v) - Make references to constants const in DOLFIN format - Don't generate code for unused entries of geometry tensor - Update formats to write numeric constants with full precision 0.1.6 (2005-03-17) ------------------ - Add support for mixing multiple different finite elements - Add support for division with constants - Fix index bug (reverse order of multi-indices) 0.1.5 (2005-03-14) ------------------ - Automatically choose the correct quadrature rule for precomputation - Add test program for verification of FIAT quadrature rules - Fix bug for derivative of sum - Improve common interface for debugging: add indentation - Add support for constants - Fix bug for sums of more than one term (make copies of references in lists) - Add '_' in naming of geometry tensor (needed for large dimensions) - Add example elasticity.form - Cleanup build_indices() 0.1.4-1 (2005-02-07) -------------------- - Fix version number and remove build directory from tarball 0.1.4 (2005-02-04) ------------------ - Fix bug for systems, seems to work now - Add common interface for debugging - Modify DOLFIN output to initialize functions - Create unique numbers for each function - Use namespaces for DOLFIN output instead of class names - Temporary implementation of dof mapping for vector-valued elements - Make DOLFIN output format put entries into PETSc block - Change name of coefficient data: c%d[%d] -> c[%d][%d] - Change ordering of basis functions (one component at a time) - Add example poissonsystem.form - Modifications for new version of FIAT (FIAT-L) FIAT version 0.1 a factor 5 slower (no memoization) FIAT version 0.1.1 a little faster, only a factor 2 slower - Add setup.py script 0.1.3 (2004-12-06) ------------------ - Fix bug in DOLFIN format (missing value when zero) - Add output of reference tensor to LaTeX format - Make raw output format print data with full precision - Add component diagram - Change order of declaration of basis functions - Add new output format raw 0.1.2 (2004-11-17) ------------------ - Add command-line interface ffc - Add support for functions (coefficients) - Add support for constants - Allow multiple forms (left- and right-hand side) in same file - Add test examples: poisson.form, mass.form, navierstokes.form - Wrap FIAT to create vector-valued finite element spaces - Check ranks of operands - Clean up algebra, add base class Element - Add some documentation (class diagram) - Add support for LaTeX output 0.1.1-1 (2004-11-10) -------------------- - Add missing file declaration.py 0.1.1 (2004-11-10) ------------------ - Make output variable names configurable - Clean up DOLFIN code generation - Post-process form to create reference, geometry, and element tensors - Experimental support for general tensor-valued elements - Clean up and improve index reassignment - Use string formatting for generation of output - Change index ordering to access row-wise 0.1.0 (2004-10-22) ------------------ - First iteration of the FEniCS Form Compiler - Change boost::shared_ptr --> std::shared_ptr ChangeLog for UFC ================= UFC was merged into FFC 2014-02-18. Below is the ChangeLog for UFC at the time of the merge. From this point onward, UFC version numbering restarts at the same version number as FFC and the rest of FEniCS. 2.3.0 (2014-01-07) ------------------ - Use std::vector > for topology data - Remove vertex coordinates from ufc::cell - Improve detection of compatible Python libraries - Add current swigversion to the JIT compiled extension module - Remove dofmap::max_local_dimension() - Remove cell argument from dofmap::local_dimension() 2.2.0 (2013-03-24) ------------------ - Add new class ufc::point_integral - Use CMake to configure JIT compilation of forms - Generate UseUFC.cmake during configuration - Remove init_mesh(), init_cell(), init_mesh_finalize() - Remove ufc::mesh and add a vector of num_mesh_entities to global_dimension() and tabulate_dofs(). 2.1.0 (2013-01-07) ------------------ - Fix bug introduced by SWIG 2.0.5, which treated uint as Python long - Add optimization SWIG flags, fixing bug lp:987657 2.0.5 (2011-12-07) ------------------ - Improve configuration of libboost-math 2.0.4 (2011-11-28) ------------------ - Add boost_math_tr1 to library flags when JIT compiling an extension module 2.0.3 (2011-10-26) ------------------ - CMake config improvements 2.0.2 (2011-08-11) ------------------ - Some tweaks of installation 2.0.1 (2011-05-16) ------------------ - Make SWIG version >= 2.0 a requirement - Add possibility to set swig binary and swig path - Add missing const for map_{from,to}_reference_cell 2.0.0 (2011-02-23) ------------------ - Add quadrature version of tabulate_tensor - Add finite_element::map_{from,to}_reference_cell - Add finite_element::{topological,geometric}_dimension - Add dofmap::topological_dimension - Rename num_foo_integrals --> num_foo_domains - Rename dof_map --> dofmap - Add finite_element::create - Add dofmap::create 1.4.2 (2010-09-01) ------------------ - Move to CMake build system 1.4.1 (2010-07-01) ------------------ - Make functions introduced in UFC 1.1 mandatory (now pure virtual) - Update templates to allow constructor arguments in form classes 1.4.0 (2010-02-01) ------------------ - Changed behavior of create_foo_integral (returning 0 when integral is 0) - Bug fixes in installation 1.2.0 (2009-09-23) ------------------ - Add new function ufc::dof_map::max_local_dimension() - Change ufc::dof_map::local_dimension() to ufc::dof_map::local_dimension(const ufc::cell c) 1.1.2 (2009-04-07) ------------------ - Added configuration and building of python extension module to ufc_utils.build_ufc_module 1.1.1 (2009-02-20) ------------------ - The extension module is now not built, if the conditions for shared_ptr are not met - Added SCons build system - The swig generated extension module will be compiled with shared_ptr support if boost is found on system and swig is of version 1.3.35 or higher - The swig generated extension module is named ufc.py and expose all ufc base classes to python - Added a swig generated extention module to ufc. UFC now depends on swig - Changed name of the python utility module from "ufc" to "ufc_utils" 1.1.0 (2008-02-18) ------------------ - Add new function ufc::finite_element::evaluate_dofs - Add new function ufc::finite_element::evaluate_basis_all - Add new function ufc::finite_element::evaluate_basis_derivatives_all - Add new function ufc::dof_map::geometric_dimension - Add new function ufc::dof_map::num_entity_dofs - Add new function ufc::dof_map::tabulate_entity_dofs 1.0.0 (2007-06-17) ------------------ - Release of UFC 1.0 ffcx-0.9.0/INSTALL000066400000000000000000000013111470142666300135040ustar00rootroot00000000000000To install FFCx, type pip install --prefix=/path/to/install/ . This will install FFCx in the default Python path of your system, something like /path/to/install/lib/python3.6/site-packages/. To specify C++ compiler and/or compiler flags used for compiling UFC and JITing, set environment variables CXX, CXXFLAGS respectively before invoking setup.py. The installation script requires the Python module distutils, which for Debian users is available with the python-dev package. Other dependencies are listed in the file README. For detailed installation instructions, see the FFCx user manual which is available on http://fenicsproject.org/ and also in the subdirectory doc/manual/ of this source tree. ffcx-0.9.0/LICENSE000066400000000000000000000005361470142666300134700ustar00rootroot00000000000000The header file ufcx.h is released using the UNLICENSE. See UNLICENSE for the license text. ------------------------------------------------------------------------------ Other files, unless stated otherwise in their head, are licensed by GNU Lesser General Public License, version 3, or later. See COPYING and COPYING.LESSER for the license text. ffcx-0.9.0/MANIFEST.in000066400000000000000000000005121470142666300142130ustar00rootroot00000000000000include AUTHORS include COPYING include COPYING.LESSER include ChangeLog.rst include INSTALL include LICENSE include ffcx/codegeneration/ufcx.h recursive-include cmake * recursive-include demo * recursive-include doc * recursive-include ffcx *.in recursive-include libs * recursive-include test * global-exclude __pycache__ *.pyc ffcx-0.9.0/README.md000066400000000000000000000046231470142666300137430ustar00rootroot00000000000000# FFCx: The FEniCSx Form Compiler [![FFCx CI](https://github.com/FEniCS/ffcx/actions/workflows/pythonapp.yml/badge.svg)](https://github.com/FEniCS/ffcx/actions/workflows/pythonapp.yml) [![Spack install](https://github.com/FEniCS/ffcx/actions/workflows/spack.yml/badge.svg)](https://github.com/FEniCS/ffcx/actions/workflows/spack.yml) [![Coverage Status](https://coveralls.io/repos/github/FEniCS/ffcx/badge.svg?branch=main)](https://coveralls.io/github/FEniCS/ffcx?branch=main) FFCx is a new version of the FEniCS Form Compiler. It is being actively developed and is compatible with DOLFINx. FFCx is a compiler for finite element variational forms. From a high-level description of the form in the Unified Form Language (UFL), it generates efficient low-level C code that can be used to assemble the corresponding discrete operator (tensor). In particular, a bilinear form may be assembled into a matrix and a linear form may be assembled into a vector. FFCx may be used either from the command line (by invoking the `ffcx` command) or as a Python module (`import ffcx`). FFCx is part of the FEniCS Project. For more information, visit https://www.fenicsproject.org ## Installation To install FFCx from PyPI: ``` $ pip install fenics-ffcx ``` To install FFCx from the source directory: ``` $ pip install . ``` ## Documentation Documentation can be viewed at https://docs.fenicsproject.org/ffcx/main ## Interface file installation only FFCx provides the `ufcx.h` interface header for finite element kernels, used by DOLFINx. `ufcx.h` is installed by FFCx within the Python site packages, but it is sometimes helpful to install only the header file. This can be done using `cmake`: ``` $ cmake -B build-dir -S cmake/ $ cmake --build build-dir $ cmake --install build-dir ``` ## License This program is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program. If not, see . ffcx-0.9.0/UNLICENSE000066400000000000000000000023261470142666300137320ustar00rootroot00000000000000The software in the file ufcx.h is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to ffcx-0.9.0/_clang-format000066400000000000000000000053651470142666300151240ustar00rootroot00000000000000--- Language: Cpp # BasedOnStyle: LLVM AccessModifierOffset: -2 AlignAfterOpenBracket: Align AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlinesLeft: false AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: true AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: All AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterDefinitionReturnType: None AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false AlwaysBreakTemplateDeclarations: true BinPackArguments: true BinPackParameters: true BraceWrapping: AfterClass: false AfterControlStatement: false AfterEnum: false AfterFunction: false AfterNamespace: false AfterObjCDeclaration: false AfterStruct: false AfterUnion: false BeforeCatch: false BeforeElse: false IndentBraces: false BreakBeforeBinaryOperators: All BreakBeforeBraces: Allman BreakBeforeTernaryOperators: true BreakConstructorInitializersBeforeComma: false BreakAfterJavaFieldAnnotations: false BreakStringLiterals: true ColumnLimit: 80 CommentPragmas: '^ IWYU pragma:' ConstructorInitializerAllOnOneLineOrOnePerLine: false ConstructorInitializerIndentWidth: 4 ContinuationIndentWidth: 4 Cpp11BracedListStyle: true DerivePointerAlignment: false DisableFormat: false ExperimentalAutoDetectBinPacking: false ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] IncludeCategories: - Regex: '^"(llvm|llvm-c|clang|clang-c)/' Priority: 2 - Regex: '^(<|"(gtest|isl|json)/)' Priority: 3 - Regex: '.*' Priority: 1 IncludeIsMainRegex: '$' IndentCaseLabels: false IndentWidth: 2 IndentWrappedFunctionNames: false JavaScriptQuotes: Leave JavaScriptWrapImports: true KeepEmptyLinesAtTheStartOfBlocks: true MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 NamespaceIndentation: None ObjCBlockIndentWidth: 2 ObjCSpaceAfterProperty: false ObjCSpaceBeforeProtocolList: true PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 60 PointerAlignment: Left ReflowComments: true SortIncludes: true SpaceAfterCStyleCast: false SpaceAfterTemplateKeyword: true SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesBeforeTrailingComments: 1 SpacesInAngles: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp11 TabWidth: 8 UseTab: Never ... ffcx-0.9.0/cmake/000077500000000000000000000000001470142666300135375ustar00rootroot00000000000000ffcx-0.9.0/cmake/CMakeLists.txt000066400000000000000000000037251470142666300163060ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.19) project(ufcx VERSION 0.9.0 DESCRIPTION "UFCx interface header for finite element kernels" LANGUAGES C HOMEPAGE_URL https://github.com/fenics/ffcx) include(GNUInstallDirs) file(SHA1 ${PROJECT_SOURCE_DIR}/../ffcx/codegeneration/ufcx.h UFCX_HASH) message("Test hash: ${UFCX_HASH}") add_library(${PROJECT_NAME} INTERFACE) target_compile_features(${PROJECT_NAME} INTERFACE c_std_17) add_library(${PROJECT_NAME}::${PROJECT_NAME} ALIAS ${PROJECT_NAME}) target_include_directories(${PROJECT_NAME} INTERFACE $ $) # Prepare and install CMake target/config files install(TARGETS ${PROJECT_NAME} EXPORT ${PROJECT_NAME}_Targets ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) include(CMakePackageConfigHelpers) write_basic_package_version_file("${PROJECT_NAME}ConfigVersion.cmake" VERSION ${PROJECT_VERSION} COMPATIBILITY AnyNewerVersion) configure_package_config_file("${PROJECT_NAME}Config.cmake.in" "${PROJECT_NAME}Config.cmake" INSTALL_DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake) install(EXPORT ${PROJECT_NAME}_Targets FILE ${PROJECT_NAME}Targets.cmake NAMESPACE ${PROJECT_NAME}:: DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake) install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake" DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake) # Install header file install(FILES ${PROJECT_SOURCE_DIR}/../ffcx/codegeneration/ufcx.h TYPE INCLUDE) # Configure and install pkgconfig file configure_file(ufcx.pc.in ufcx.pc @ONLY) install(FILES ${PROJECT_BINARY_DIR}/ufcx.pc DESTINATION ${CMAKE_INSTALL_DATADIR}/pkgconfig) ffcx-0.9.0/cmake/ufcx.pc.in000066400000000000000000000004211470142666300154320ustar00rootroot00000000000000prefix="@CMAKE_INSTALL_PREFIX@" exec_prefix="${prefix}" includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@ definitions=@ Name: @PROJECT_NAME@ Description: @CMAKE_PROJECT_DESCRIPTION@ URL: @CMAKE_PROJECT_HOMEPAGE_URL@ Version: @PROJECT_VERSION@ Cflags: -I"${includedir}" Libs:ffcx-0.9.0/cmake/ufcxConfig.cmake.in000066400000000000000000000002341470142666300172400ustar00rootroot00000000000000@PACKAGE_INIT@ set(UFCX_SIGNATURE @UFCX_HASH@) include("${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@Targets.cmake") check_required_components("@PROJECT_NAME@")ffcx-0.9.0/demo/000077500000000000000000000000001470142666300134035ustar00rootroot00000000000000ffcx-0.9.0/demo/BiharmonicHHJ.py000066400000000000000000000021141470142666300163600ustar00rootroot00000000000000# Copyright (C) 2016 Lizao Li """Biharmonis HHJ demo. The bilinear form a(u, v) and linear form L(v) for Biharmonic equation in Hellan-Herrmann-Johnson (HHJ) formulation. """ import basix.ufl from ufl import ( Coefficient, FacetNormal, FunctionSpace, Mesh, TestFunctions, TrialFunctions, dot, dS, ds, dx, grad, inner, jump, ) HHJ = basix.ufl.element("HHJ", "triangle", 2) P = basix.ufl.element("P", "triangle", 3) mixed_element = basix.ufl.mixed_element([HHJ, P]) domain = Mesh(basix.ufl.element("P", "triangle", 1, shape=(2,))) mixed_space = FunctionSpace(domain, mixed_element) p_space = FunctionSpace(domain, P) (sigma, u) = TrialFunctions(mixed_space) (tau, v) = TestFunctions(mixed_space) f = Coefficient(p_space) def b(sigma, v): """The form b.""" n = FacetNormal(domain) return ( inner(sigma, grad(grad(v))) * dx - dot(dot(sigma("+"), n("+")), n("+")) * jump(grad(v), n) * dS - dot(dot(sigma, n), n) * dot(grad(v), n) * ds ) a = inner(sigma, tau) * dx - b(tau, u) + b(sigma, v) L = f * v * dx ffcx-0.9.0/demo/BiharmonicRegge.py000066400000000000000000000022511470142666300170020ustar00rootroot00000000000000# Copyright (C) 2016 Lizao Li """Biharmonic Regge demo. The bilinear form a(u, v) and linear form L(v) for Biharmonic equation in Regge formulation. """ import basix.ufl from ufl import ( Coefficient, FacetNormal, FunctionSpace, Identity, Mesh, TestFunctions, TrialFunctions, dot, dS, ds, dx, grad, inner, jump, tr, ) REG = basix.ufl.element("Regge", "tetrahedron", 1) P = basix.ufl.element("Lagrange", "tetrahedron", 2) mixed_element = basix.ufl.mixed_element([REG, P]) domain = Mesh(basix.ufl.element("P", "tetrahedron", 1, shape=(3,))) mixed_space = FunctionSpace(domain, mixed_element) p_space = FunctionSpace(domain, P) (sigma, u) = TrialFunctions(mixed_space) (tau, v) = TestFunctions(mixed_space) f = Coefficient(p_space) def S(mu): """The form S.""" return mu - Identity(3) * tr(mu) def b(mu, v): """The form b.""" n = FacetNormal(domain) return ( inner(S(mu), grad(grad(v))) * dx - dot(dot(S(mu("+")), n("+")), n("+")) * jump(grad(v), n) * dS - dot(dot(S(mu), n), n) * dot(grad(v), n) * ds ) a = inner(S(sigma), S(tau)) * dx - b(tau, u) + b(sigma, v) L = f * v * dx ffcx-0.9.0/demo/CellGeometry.py000066400000000000000000000017061470142666300163540ustar00rootroot00000000000000# Copyright (C) 2013 Martin S. Alnaes """Cell geometry demo. A functional M involving a bunch of cell geometry quantities. """ import basix.ufl from ufl import ( CellVolume, Circumradius, Coefficient, FacetArea, FacetNormal, FunctionSpace, Mesh, SpatialCoordinate, TrialFunction, ds, dx, ) from ufl.geometry import FacetEdgeVectors V = basix.ufl.element("P", "tetrahedron", 1) domain = Mesh(basix.ufl.element("P", "tetrahedron", 1, shape=(3,))) space = FunctionSpace(domain, V) u = Coefficient(space) # TODO: Add all geometry for all cell types to this and other demo # files, need for regression test. x = SpatialCoordinate(domain) n = FacetNormal(domain) vol = CellVolume(domain) rad = Circumradius(domain) area = FacetArea(domain) M = u * (x[0] * vol * rad) * dx + u * (x[0] * vol * rad * area) * ds # Test some obscure functionality fev = FacetEdgeVectors(domain) v = TrialFunction(space) L = fev[0, 0] * v * ds ffcx-0.9.0/demo/ComplexPoisson.py000066400000000000000000000025121470142666300167370ustar00rootroot00000000000000# Copyright (C) 2023 Chris Richardson # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Complex Poisson demo. The bilinear form a(u, v) and linear form L(v) for Poisson's equation using bilinear elements on bilinear mesh geometry. """ import basix.ufl from ufl import Coefficient, FunctionSpace, Mesh, TestFunction, TrialFunction, dx, grad, inner coords = basix.ufl.element("P", "triangle", 2, shape=(2,)) mesh = Mesh(coords) dx = dx(mesh) element = basix.ufl.element("P", mesh.ufl_cell().cellname(), 2) space = FunctionSpace(mesh, element) u = TrialFunction(space) v = TestFunction(space) f = Coefficient(space) # Test literal complex number in form k = 3.213 + 1.023j a = k * inner(grad(u), grad(v)) * dx L = inner(k * f, v) * dx ffcx-0.9.0/demo/Components.py000066400000000000000000000022351470142666300161040ustar00rootroot00000000000000# Copyright (C) 2011 Garth N. Wells # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Components demo. This example demonstrates how to create vectors component-wise. """ import basix.ufl from ufl import Coefficient, FunctionSpace, Mesh, TestFunction, as_vector, dx, inner element = basix.ufl.element("Lagrange", "tetrahedron", 1, shape=(3,)) domain = Mesh(element) space = FunctionSpace(domain, element) v = TestFunction(space) f = Coefficient(space) # Create vector v0 = as_vector([v[0], v[1], 0.0]) # Use created vector in linear form L = inner(f, v0) * dx ffcx-0.9.0/demo/Conditional.py000066400000000000000000000032021470142666300162150ustar00rootroot00000000000000# Copyright (C) 2010-2011 Kristian B. Oelgaard # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Conditional demo. Illustration on how to use Conditional to define a source term. """ import basix.ufl from ufl import ( And, Constant, FunctionSpace, Mesh, Not, Or, SpatialCoordinate, TestFunction, conditional, dx, ge, gt, inner, le, lt, ) element = basix.ufl.element("Lagrange", "triangle", 2) domain = Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = FunctionSpace(domain, element) v = TestFunction(space) g = Constant(domain) x = SpatialCoordinate(domain) c0 = conditional(le((x[0] - 0.33) ** 2 + (x[1] - 0.67) ** 2, 0.015), -1.0, 5.0) c = conditional(le((x[0] - 0.33) ** 2 + (x[1] - 0.67) ** 2, 0.025), c0, 0.0) t0 = And(ge(x[0], 0.55), le(x[0], 0.95)) t1 = Or(lt(x[1], 0.05), gt(x[1], 0.45)) t2 = And(t0, Not(t1)) t = conditional(And(ge(x[1] - x[0] - 0.05 + 0.55, 0.0), t2), -1.0, 0.0) k = conditional(gt(1, 0), g, g + 1) f = c + t + k L = inner(f, v) * dx ffcx-0.9.0/demo/ExpressionInterpolation.py000066400000000000000000000042651470142666300206730ustar00rootroot00000000000000# Copyright (C) 2022 Jørgen S. Dokken # # This file is part of FFCx. # # FFC is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFC is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFC. If not, see . """Expression interpolation demo. Defines an Expression which evaluates the several different functions at a set of interpolation points. """ import basix import basix.ufl from ufl import Coefficient, FunctionSpace, Mesh, grad # Define mesh cell = "triangle" v_el = basix.ufl.element("Lagrange", cell, 1, shape=(2,)) mesh = Mesh(v_el) # Define mixed function space el = basix.ufl.element("P", cell, 2) el_int = basix.ufl.element("Discontinuous Lagrange", cell, 1, shape=(2,)) me = basix.ufl.mixed_element([el, el_int]) V = FunctionSpace(mesh, me) u = Coefficient(V) # Define expressions on each sub-space du0 = grad(u[0]) du1 = grad(u[1]) # Define an expression using quadrature elements q_rule = "gauss_jacobi" q_degree = 3 q_el = basix.ufl.quadrature_element(cell, scheme=q_rule, degree=q_degree) Q = FunctionSpace(mesh, q_el) q = Coefficient(Q) powq = 3 * q**2 # Extract basix cell type b_cell = basix.CellType[cell] # Find quadrature points for quadrature element b_rule = basix.quadrature.string_to_type(q_rule) quadrature_points, _ = basix.quadrature.make_quadrature(b_cell, q_degree, rule=b_rule) # Get interpolation points for output space family = basix.finite_element.string_to_family("Lagrange", cell) b_element = basix.create_element( family, b_cell, 4, basix.LagrangeVariant.gll_warped, discontinuous=True ) interpolation_points = b_element.points # Create expressions that can be used for interpolation expressions = [(du0, interpolation_points), (du1, interpolation_points), (powq, quadrature_points)] ffcx-0.9.0/demo/FacetIntegrals.py000066400000000000000000000025441470142666300166550ustar00rootroot00000000000000# Copyright (C) 2009-2010 Anders Logg # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Facet integrals demo. Simple example of a form defined over exterior and interior facets. """ import basix.ufl from ufl import ( FacetNormal, FunctionSpace, Mesh, TestFunction, TrialFunction, avg, dS, ds, grad, inner, jump, ) element = basix.ufl.element("Discontinuous Lagrange", "triangle", 1) domain = Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = FunctionSpace(domain, element) u = TrialFunction(space) v = TestFunction(space) n = FacetNormal(domain) a = ( inner(u, v) * ds + inner(u("+"), v("-")) * dS + inner(jump(u, n), avg(grad(v))) * dS + inner(avg(grad(u)), jump(v, n)) * dS ) ffcx-0.9.0/demo/FacetRestrictionAD.py000066400000000000000000000023601470142666300174330ustar00rootroot00000000000000# Copyright (C) 2010 Garth N. Wells # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Facet restriction demo.""" import basix.ufl from ufl import ( Coefficient, FunctionSpace, Mesh, TestFunction, TrialFunction, avg, derivative, dS, dx, grad, inner, ) element = basix.ufl.element("Discontinuous Lagrange", "triangle", 1) domain = Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = FunctionSpace(domain, element) v = TestFunction(space) w = Coefficient(space) L = inner(grad(w), grad(v)) * dx - inner(avg(grad(w)), avg(grad(v))) * dS u = TrialFunction(space) a = derivative(L, w, u) ffcx-0.9.0/demo/HyperElasticity.py000066400000000000000000000047771470142666300171160ustar00rootroot00000000000000# Author: Martin Sandve Alnes # Date: 2008-12-22 # Modified by Garth N. Wells, 2009 """Hyper-elasticity demo.""" import basix.ufl from ufl import ( Coefficient, Constant, FacetNormal, FunctionSpace, Identity, Mesh, SpatialCoordinate, TestFunction, TrialFunction, derivative, det, diff, ds, dx, exp, grad, inner, inv, tetrahedron, tr, variable, ) # Cell and its properties cell = tetrahedron d = 3 # Elements u_element = basix.ufl.element("P", cell.cellname(), 2, shape=(3,)) p_element = basix.ufl.element("P", cell.cellname(), 1) A_element = basix.ufl.element("P", cell.cellname(), 1, shape=(3, 3)) # Spaces domain = Mesh(basix.ufl.element("Lagrange", cell.cellname(), 1, shape=(3,))) u_space = FunctionSpace(domain, u_element) p_space = FunctionSpace(domain, p_element) A_space = FunctionSpace(domain, A_element) # Cell properties N = FacetNormal(domain) x = SpatialCoordinate(domain) # Test and trial functions v = TestFunction(u_space) w = TrialFunction(u_space) # Displacement at current and two previous timesteps u = Coefficient(u_space) up = Coefficient(u_space) upp = Coefficient(u_space) # Time parameters dt = Constant(domain) # Fiber field A = Coefficient(A_space) # External forces T = Coefficient(u_space) p0 = Coefficient(p_space) # Material parameters FIXME rho = Constant(domain) K = Constant(domain) c00 = Constant(domain) c11 = Constant(domain) c22 = Constant(domain) # Deformation gradient Ident = Identity(d) F = Ident + grad(u) F = variable(F) Finv = inv(F) J = det(F) # Left Cauchy-Green deformation tensor B = F * F.T I1_B = tr(B) I2_B = (I1_B**2 - tr(B * B)) / 2 I3_B = J**2 # Right Cauchy-Green deformation tensor C = F.T * F I1_C = tr(C) I2_C = (I1_C**2 - tr(C * C)) / 2 I3_C = J**2 # Green strain tensor E = (C - Ident) / 2 # Mapping of strain in fiber directions Ef = A * E * A.T # Strain energy function W(Q(Ef)) Q = ( c00 * Ef[0, 0] ** 2 + c11 * Ef[1, 1] ** 2 + c22 * Ef[2, 2] ** 2 ) # FIXME: insert some simple law here W = (K / 2) * (exp(Q) - 1) # + p stuff # First Piola-Kirchoff stress tensor P = diff(W, F) # Acceleration term discretized with finite differences k = dt / rho acc = u - 2 * up + upp # Residual equation # FIXME: Can contain errors, not tested! a_F = ( inner(acc, v) * dx + k * inner(P, grad(v)) * dx - k * inner(J * Finv * T, v) * ds(0) - k * inner(J * Finv * p0 * N, v) * ds(1) ) # Jacobi matrix of residual equation a_J = derivative(a_F, u, w) # Export forms forms = [a_F, a_J] ffcx-0.9.0/demo/MassAction.py000066400000000000000000000013141470142666300160150ustar00rootroot00000000000000# Copyright (C) 2023 Igor A. Baratta # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Mass action demo.""" import basix import ufl P = 3 cell_type = basix.CellType.hexahedron # create element with tensor product order element = basix.ufl.wrap_element( basix.create_tp_element(basix.ElementFamily.P, cell_type, P, basix.LagrangeVariant.gll_warped) ) coords = basix.ufl.element(basix.ElementFamily.P, cell_type, 1, shape=(3,)) mesh = ufl.Mesh(coords) V = ufl.FunctionSpace(mesh, element) x = ufl.SpatialCoordinate(mesh) v = ufl.TestFunction(V) u = ufl.TrialFunction(V) a = ufl.inner(u, v) * ufl.dx w = ufl.Coefficient(V) L = ufl.action(a, w) ffcx-0.9.0/demo/MassDG0.py000066400000000000000000000021071470142666300151530ustar00rootroot00000000000000# Copyright (C) 2021 Igor Baratta # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """DG mass matrix demo. The bilinear form for a mass matrix. """ import basix.ufl from ufl import FunctionSpace, Mesh, TestFunction, TrialFunction, dx, inner element = basix.ufl.element("DG", "tetrahedron", 0) domain = Mesh(basix.ufl.element("Lagrange", "tetrahedron", 1, shape=(3,))) space = FunctionSpace(domain, element) v = TestFunction(space) u = TrialFunction(space) a = inner(u, v) * dx ffcx-0.9.0/demo/MassHcurl_2D_1.py000066400000000000000000000020471470142666300164260ustar00rootroot00000000000000# Copyright (C) 2004-2010 Anders Logg # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """H(curl) mass matrix demo.""" import basix.ufl from ufl import FunctionSpace, Mesh, TestFunction, TrialFunction, dx, inner element = basix.ufl.element("N1curl", "triangle", 1) domain = Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = FunctionSpace(domain, element) v = TestFunction(space) u = TrialFunction(space) a = inner(u, v) * dx ffcx-0.9.0/demo/MassHdiv_2D_1.py000066400000000000000000000020361470142666300162410ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """H(div) mass matrix demo.""" import basix.ufl from ufl import FunctionSpace, Mesh, TestFunction, TrialFunction, dx, inner element = basix.ufl.element("BDM", "triangle", 1) domain = Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = FunctionSpace(domain, element) v = TestFunction(space) u = TrialFunction(space) a = inner(u, v) * dx ffcx-0.9.0/demo/MathFunctions.py000066400000000000000000000037601470142666300165450ustar00rootroot00000000000000# Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Math function demo. Test all algebra operators on Coefficients. """ import basix.ufl from ufl import ( Coefficient, FunctionSpace, Mesh, acos, asin, atan, bessel_J, bessel_Y, cos, dx, erf, exp, ln, sin, sqrt, tan, ) element = basix.ufl.element("Lagrange", "triangle", 1) domain = Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = FunctionSpace(domain, element) c0 = Coefficient(space) c1 = Coefficient(space) s0 = 3 * c0 - c1 p0 = c0 * c1 f0 = c0 / c1 integrand = ( sqrt(c0) + sqrt(s0) + sqrt(p0) + sqrt(f0) + exp(c0) + exp(s0) + exp(p0) + exp(f0) + ln(c0) + ln(s0) + ln(p0) + ln(f0) + cos(c0) + cos(s0) + cos(p0) + cos(f0) + sin(c0) + sin(s0) + sin(p0) + sin(f0) + tan(c0) + tan(s0) + tan(p0) + tan(f0) + acos(c0) + acos(s0) + acos(p0) + acos(f0) + asin(c0) + asin(s0) + asin(p0) + asin(f0) + atan(c0) + atan(s0) + atan(p0) + atan(f0) + erf(c0) + erf(s0) + erf(p0) + erf(f0) + bessel_J(1, c0) + bessel_J(1, s0) + bessel_J(0, p0) + bessel_J(0, f0) + bessel_Y(1, c0) + bessel_Y(1, s0) + bessel_Y(0, p0) + bessel_Y(0, f0) ) a = integrand * dx ffcx-0.9.0/demo/MetaData.py000066400000000000000000000031701470142666300154360ustar00rootroot00000000000000# Copyright (C) 2009 Kristian B. Oelgaard # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Metadata demo. Test form for metadata. """ import basix.ufl from ufl import ( Coefficient, Constant, FunctionSpace, Mesh, TestFunction, TrialFunction, dx, grad, inner, ) element = basix.ufl.element("Lagrange", "triangle", 1) vector_element = basix.ufl.element("Lagrange", "triangle", 1, shape=(2,)) domain = Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = FunctionSpace(domain, element) vector_space = FunctionSpace(domain, vector_element) u = TrialFunction(space) v = TestFunction(space) c = Coefficient(vector_space) c2 = Constant(domain) # Terms on the same subdomain using different quadrature degree a = ( inner(grad(u), grad(v)) * dx(0, degree=8) + inner(c, c) * inner(grad(u), grad(v)) * dx(1, degree=4) + inner(c, c) * inner(grad(u), grad(v)) * dx(1, degree=2) + inner(grad(u), grad(v)) * dx(1, degree=-1) ) L = inner(c2, v) * dx(0, metadata={"precision": 1}) ffcx-0.9.0/demo/Mini.py000066400000000000000000000030451470142666300146530ustar00rootroot00000000000000# Copyright (C) 2010 Marie E. Rognes # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Mini element demo. Illustration of vector sum of elements (EnrichedElement): The bilinear form a(u, v) for the Stokes equations using a mixed formulation involving the Mini element. The velocity element is composed of a P1 element augmented by the cubic bubble function. """ import basix.ufl from ufl import FunctionSpace, Mesh, TestFunctions, TrialFunctions, div, dx, grad, inner P1 = basix.ufl.element("Lagrange", "triangle", 1) B = basix.ufl.element("Bubble", "triangle", 3) V = basix.ufl.blocked_element(basix.ufl.enriched_element([P1, B]), shape=(2,)) Q = basix.ufl.element("P", "triangle", 1) domain = Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) Mini = FunctionSpace(domain, basix.ufl.mixed_element([V, Q])) (u, p) = TrialFunctions(Mini) (v, q) = TestFunctions(Mini) a = (inner(grad(u), grad(v)) - inner(p, div(v)) + inner(div(u), q)) * dx ffcx-0.9.0/demo/MixedCoefficient.py000066400000000000000000000022551470142666300171660ustar00rootroot00000000000000# Copyright (C) 2016 Miklós Homolya # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Mixed coefficient demo.""" import basix.ufl from ufl import Coefficients, FunctionSpace, Mesh, dot, dS, dx DG = basix.ufl.element("DG", "triangle", 0, shape=(2,)) CG = basix.ufl.element("Lagrange", "triangle", 2) RT = basix.ufl.element("RT", "triangle", 3) element = basix.ufl.mixed_element([DG, CG, RT]) domain = Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = FunctionSpace(domain, element) f, g, h = Coefficients(space) forms = [dot(f("+"), h("-")) * dS + g * dx] ffcx-0.9.0/demo/MixedGradient.py000066400000000000000000000007521470142666300165050ustar00rootroot00000000000000"""Mixed gradient demo.""" import basix.ufl from ufl import FunctionSpace, Mesh, TestFunctions, TrialFunctions, ds, grad, inner element1 = basix.ufl.element("DG", "triangle", 1) element2 = basix.ufl.element("DGT", "triangle", 1) element = basix.ufl.mixed_element([element1, element2]) domain = Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = FunctionSpace(domain, element) u = TrialFunctions(space)[0] v = TestFunctions(space)[0] a = inner(grad(u), grad(v)) * ds ffcx-0.9.0/demo/MixedPoissonDual.py000066400000000000000000000027541470142666300172140ustar00rootroot00000000000000# Copyright (C) 2014 Jan Blechta # # This file is part of FFCx. # # DOLFINx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DOLFINx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DOLFINx. If not, see . """Mixed Poisson dual demo. The bilinear form a(u, v) and linear form L(v) for a two-field (mixed) formulation of Poisson's equation. """ import basix.ufl from ufl import Coefficient, FunctionSpace, Mesh, TestFunctions, TrialFunctions, ds, dx, grad, inner DRT = basix.ufl.element("Discontinuous RT", "triangle", 2) P = basix.ufl.element("P", "triangle", 3) W = basix.ufl.mixed_element([DRT, P]) domain = Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = FunctionSpace(domain, W) (sigma, u) = TrialFunctions(space) (tau, v) = TestFunctions(space) P1 = basix.ufl.element("P", "triangle", 1) space = FunctionSpace(domain, P1) f = Coefficient(space) g = Coefficient(space) a = (inner(sigma, tau) + inner(grad(u), tau) + inner(sigma, grad(v))) * dx L = -inner(f, v) * dx - inner(g, v) * ds ffcx-0.9.0/demo/Normals.py000066400000000000000000000023561470142666300153760ustar00rootroot00000000000000# Copyright (C) 2009 Peter Brune # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Normals demo. This example demonstrates how to use the facet normals Merely project the normal onto a vector section. """ import basix.ufl from ufl import FacetNormal, FunctionSpace, Mesh, TestFunction, TrialFunction, ds, inner, triangle cell = triangle element = basix.ufl.element("Lagrange", cell.cellname(), 1, shape=(2,)) domain = Mesh(basix.ufl.element("Lagrange", cell.cellname(), 1, shape=(2,))) space = FunctionSpace(domain, element) n = FacetNormal(domain) v = TrialFunction(space) u = TestFunction(space) a = inner(v, u) * ds L = inner(n, u) * ds ffcx-0.9.0/demo/Poisson1D.py000066400000000000000000000022641470142666300156000ustar00rootroot00000000000000# Copyright (C) 2004-2007 Anders Logg # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """1D Poisson demo. The bilinear form a(u, v) and linear form L(v) for Poisson's equation. """ import basix.ufl from ufl import Coefficient, FunctionSpace, Mesh, TestFunction, TrialFunction, dx, grad, inner element = basix.ufl.element("Lagrange", "interval", 1) domain = Mesh(basix.ufl.element("Lagrange", "interval", 1, shape=(1,))) space = FunctionSpace(domain, element) u = TrialFunction(space) v = TestFunction(space) f = Coefficient(space) a = inner(grad(u), grad(v)) * dx L = inner(f, v) * dx ffcx-0.9.0/demo/PoissonQuad.py000066400000000000000000000024111470142666300162200ustar00rootroot00000000000000# Copyright (C) 2016 Jan Blechta # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Quadrilateral Poisson demo. The bilinear form a(u, v) and linear form L(v) for Poisson's equation using bilinear elements on bilinear mesh geometry. """ import basix.ufl from ufl import Coefficient, FunctionSpace, Mesh, TestFunction, TrialFunction, dx, grad, inner coords = basix.ufl.element("P", "triangle", 2, shape=(2,)) mesh = Mesh(coords) dx = dx(mesh) element = basix.ufl.element("P", mesh.ufl_cell().cellname(), 2) space = FunctionSpace(mesh, element) u = TrialFunction(space) v = TestFunction(space) f = Coefficient(space) a = inner(grad(u), grad(v)) * dx L = inner(f, v) * dx ffcx-0.9.0/demo/ProjectionManifold.py000066400000000000000000000025101470142666300175410ustar00rootroot00000000000000# Copyright (C) 2012 Marie E. Rognes and David Ham # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Projection manifold demo. This demo illustrates use of finite element spaces defined over simplicies embedded in higher dimensions. """ import basix.ufl from ufl import FunctionSpace, Mesh, TestFunctions, TrialFunctions, div, dx, inner # Define element over this domain V = basix.ufl.element("RT", "triangle", 1) Q = basix.ufl.element("DG", "triangle", 0) element = basix.ufl.mixed_element([V, Q]) domain = Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(3,))) space = FunctionSpace(domain, element) (u, p) = TrialFunctions(space) (v, q) = TestFunctions(space) a = (inner(u, v) + inner(div(u), q) + inner(p, div(v))) * dx ffcx-0.9.0/demo/ReactionDiffusion.py000066400000000000000000000023711470142666300173730ustar00rootroot00000000000000# Copyright (C) 2009 Anders Logg # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Reaction-diffusion demo. The bilinear form a(u, v) and linear form L(v) for a simple reaction-diffusion equation using simplified tuple notation. """ import basix.ufl from ufl import Coefficient, FunctionSpace, Mesh, TestFunction, TrialFunction, dx, grad, inner element = basix.ufl.element("Lagrange", "triangle", 1) domain = Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = FunctionSpace(domain, element) u = TrialFunction(space) v = TestFunction(space) f = Coefficient(space) a = (inner(grad(u), grad(v)) + inner(u, v)) * dx L = inner(f, v) * dx ffcx-0.9.0/demo/SpatialCoordinates.py000066400000000000000000000027271470142666300175550ustar00rootroot00000000000000# Copyright (C) 2010 Kristian B. Oelgaard # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Spatial coordinates demo. The bilinear form a(u, v) and linear form L(v) for Poisson's equation where spatial coordinates are used to define the source and boundary flux terms. """ import basix.ufl from ufl import ( FunctionSpace, Mesh, SpatialCoordinate, TestFunction, TrialFunction, ds, dx, exp, grad, inner, sin, ) element = basix.ufl.element("Lagrange", "triangle", 2) domain = Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = FunctionSpace(domain, element) u = TrialFunction(space) v = TestFunction(space) x = SpatialCoordinate(domain) d_x = x[0] - 0.5 d_y = x[1] - 0.5 f = 10.0 * exp(-(d_x * d_x + d_y * d_y) / 0.02) g = sin(5.0 * x[0]) a = inner(grad(u), grad(v)) * dx L = inner(f, v) * dx + inner(g, v) * ds ffcx-0.9.0/demo/StabilisedStokes.py000066400000000000000000000032521470142666300172330ustar00rootroot00000000000000# Copyright (c) 2005-2007 Anders Logg # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Stabilised Stokes demo. The bilinear form a(u, v) and Linear form L(v) for the Stokes equations using a mixed formulation (equal-order stabilized). """ import basix.ufl from ufl import ( Coefficient, FunctionSpace, Mesh, TestFunctions, TrialFunctions, div, dot, dx, grad, inner, ) vector = basix.ufl.element("Lagrange", "triangle", 1, shape=(2,)) scalar = basix.ufl.element("Lagrange", "triangle", 1) system = basix.ufl.mixed_element([vector, scalar]) domain = Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) system_space = FunctionSpace(domain, system) scalar_space = FunctionSpace(domain, scalar) vector_space = FunctionSpace(domain, vector) (u, p) = TrialFunctions(system_space) (v, q) = TestFunctions(system_space) f = Coefficient(vector_space) h = Coefficient(scalar_space) beta = 0.2 delta = beta * h * h a = (inner(grad(u), grad(v)) - div(v) * p + div(u) * q + delta * dot(grad(p), grad(q))) * dx L = dot(f, v + delta * grad(q)) * dx ffcx-0.9.0/demo/Symmetry.py000066400000000000000000000006001470142666300156020ustar00rootroot00000000000000"""Symmetry demo.""" import basix.ufl from ufl import FunctionSpace, Mesh, TestFunction, TrialFunction, dx, grad, inner P1 = basix.ufl.element("P", "triangle", 1, shape=(2, 2), symmetry=True) domain = Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = FunctionSpace(domain, P1) u = TrialFunction(space) v = TestFunction(space) a = inner(grad(u), grad(v)) * dx ffcx-0.9.0/demo/TraceElement.py000066400000000000000000000020001470142666300163150ustar00rootroot00000000000000# Copyright (C) 2015 Marie E. Rognes # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Trace element demo.""" import basix.ufl from ufl import FunctionSpace, Mesh, TestFunction, avg, dS, ds element = basix.ufl.element("HDiv Trace", "triangle", 0) domain = Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = FunctionSpace(domain, element) v = TestFunction(space) L = v * ds + avg(v) * dS ffcx-0.9.0/demo/VectorConstant.py000066400000000000000000000026721470142666300167400ustar00rootroot00000000000000# Copyright (C) 2016 Jan Blechta # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Vector constant demo. The bilinear form a(u, v) and linear form L(v) for Poisson's equation using bilinear elements on bilinear mesh geometry. """ import basix.ufl from ufl import ( Coefficient, Constant, FunctionSpace, Mesh, TestFunction, TrialFunction, dx, grad, inner, ) coords = basix.ufl.element("P", "triangle", 2, shape=(2,)) mesh = Mesh(coords) dx = dx(mesh) element = basix.ufl.element("P", mesh.ufl_cell().cellname(), 2) space = FunctionSpace(mesh, element) u = TrialFunction(space) v = TestFunction(space) f = Coefficient(space) L = inner(f, v) * dx mu = Constant(mesh, shape=(3,)) theta = -(mu[1] - 2) / mu[0] - (2 * (2 * mu[0] - 2) * (mu[0] - 1)) / (mu[0] * (mu[1] - 2)) a = theta * inner(grad(u), grad(v)) * dx ffcx-0.9.0/demo/VectorPoisson.py000066400000000000000000000022401470142666300165700ustar00rootroot00000000000000# Copyright (C) 2010 Anders Logg # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . """Vector Poisson demo. The bilinear form a(u, v) and linear form L(v) for the vector-valued Poisson's equation. """ import basix.ufl from ufl import Coefficient, FunctionSpace, Mesh, TestFunction, TrialFunction, dx, grad, inner element = basix.ufl.element("Lagrange", "triangle", 1, shape=(2,)) domain = Mesh(element) space = FunctionSpace(domain, element) u = TrialFunction(space) v = TestFunction(space) f = Coefficient(space) a = inner(grad(u), grad(v)) * dx L = inner(f, v) * dx ffcx-0.9.0/demo/test_demos.py000066400000000000000000000047161470142666300161330ustar00rootroot00000000000000"""Test demos.""" import os import sys import pytest demo_dir = os.path.dirname(os.path.realpath(__file__)) ufl_files = [] for file in os.listdir(demo_dir): if file.endswith(".py") and not file == "test_demos.py": ufl_files.append(file[:-3]) @pytest.mark.parametrize("file", ufl_files) @pytest.mark.parametrize("scalar_type", ["float64", "float32", "complex128", "complex64"]) def test_demo(file, scalar_type): """Test a demo.""" if sys.platform.startswith("win32") and "complex" in scalar_type: # Skip complex demos on win32 pytest.skip(reason="_Complex not supported on Windows") if file in [ "MixedGradient", "TraceElement", # HDiv Trace "MixedElasticity", # VectorElement of BDM "RestrictedElement", "_TensorProductElement", ]: # Skip demos that use elements not yet implemented in Basix pytest.skip(reason="Element not yet implemented in Basix") if "complex" in scalar_type and file in [ "BiharmonicHHJ", "BiharmonicRegge", "StabilisedStokes", ]: # Skip demos that are not implemented for complex scalars pytest.skip(reason="Not implemented for complex types") elif "Complex" in file and scalar_type in ["float64", "float32"]: # Skip demos that are only implemented for complex scalars pytest.skip(reason="Not implemented for real types") if sys.platform.startswith("win32"): opts = f"--scalar_type {scalar_type}" extra_flags = "/std:c17" assert os.system(f"cd {demo_dir} && ffcx {opts} {file}.py") == 0 assert ( os.system( f"cd {demo_dir} && " f'cl.exe /I "../ffcx/codegeneration" {extra_flags} /c {file}.c' ) ) == 0 assert ( os.system( f"cd {demo_dir} && " f'clang-cl.exe /I "../ffcx/codegeneration" {extra_flags} /c {file}.c' ) ) == 0 else: cc = os.environ.get("CC", "cc") opts = f"--scalar_type {scalar_type}" extra_flags = ( "-std=c17 -Wunused-variable -Werror -fPIC -Wno-error=implicit-function-declaration" ) assert os.system(f"cd {demo_dir} && ffcx {opts} {file}.py") == 0 assert ( os.system( f"cd {demo_dir} && " f"{cc} -I../ffcx/codegeneration " f"{extra_flags} " f"-c {file}.c" ) == 0 ) ffcx-0.9.0/doc/000077500000000000000000000000001470142666300132245ustar00rootroot00000000000000ffcx-0.9.0/doc/Makefile000066400000000000000000000011541470142666300146650ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = FEniCSFormCompilerX SOURCEDIR = source BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)ffcx-0.9.0/doc/source/000077500000000000000000000000001470142666300145245ustar00rootroot00000000000000ffcx-0.9.0/doc/source/conf.py000066400000000000000000000132651470142666300160320ustar00rootroot00000000000000"""Configuration file for the Sphinx documentation builder.""" # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/stable/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) import datetime import ffcx # -- Project information ----------------------------------------------------- project = "FEniCS Form Compiler X" now = datetime.datetime.now() date = now.date() copyright = f"{date.year}, FEniCS Project" author = "FEniCS Project" # The short X.Y version version = ffcx.__version__ # The full version, including alpha/beta/rc tags release = ffcx.__version__ # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.doctest", "sphinx.ext.todo", "sphinx.ext.coverage", "sphinx.ext.mathjax", "sphinx.ext.napoleon", "sphinx.ext.viewcode", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The master toctree document. master_doc = "index" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = 'alabaster' html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = "FEniCSFormCompilerXdoc" # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( master_doc, "FEniCSFormCompilerX.tex", "FEniCS Form Compiler X Documentation", "FEniCS Project", "manual", ), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, "fenicsformcompilerx", "FEniCS Form Compiler X Documentation", [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "FEniCSFormCompilerX", "FEniCS Form Compiler X Documentation", author, "FEniCSFormCompilerX", "One line description of project.", "Miscellaneous", ), ] # -- Extension configuration ------------------------------------------------- # -- Options for todo extension ---------------------------------------------- # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True autodoc_default_options = { "members": True, "show-inheritance": True, "imported-members": True, "undoc-members": True, } autosummary_generate = True autoclass_content = "both" autodoc_default_flags = ["members", "show-inheritance"] napoleon_numpy_docstring = True napoleon_google_docstring = True ffcx-0.9.0/doc/source/index.rst000066400000000000000000000012011470142666300163570ustar00rootroot00000000000000FEniCS Form Compiler 'X' documentation ====================================== The is an experimental version of the FEniCS Form Compiler. It is developed at https://github.com/FEniCS/ffcx. .. toctree:: :maxdepth: 2 :caption: Contents: API reference ============= .. autosummary:: :toctree: _autogenerated ffcx ffcx.__main__ ffcx.analysis ffcx.compiler ffcx.element_interface ffcx.formatting ffcx.main ffcx.naming ffcx.codegeneration ffcx.options ffcx.ir.representation ffcx.ir.representationutils Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ffcx-0.9.0/ffcx/000077500000000000000000000000001470142666300134055ustar00rootroot00000000000000ffcx-0.9.0/ffcx/__init__.py000066400000000000000000000007651470142666300155260ustar00rootroot00000000000000# Copyright (C) 2009-2018 FEniCS Project # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """FEniCS Form Compiler (FFCx). FFCx compiles finite element variational forms into C code. """ import importlib.metadata import logging # Import default options from ffcx.options import get_options # noqa: F401 __version__ = importlib.metadata.version("fenics-ffcx") logger = logging.getLogger("ffcx") logging.captureWarnings(capture=True) ffcx-0.9.0/ffcx/__main__.py000066400000000000000000000004601470142666300154770ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2017-2017 Martin Sandve Alnæs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Run ffcx on a UFL file.""" from ffcx.main import main if __name__ == "__main__": import sys sys.exit(main()) ffcx-0.9.0/ffcx/analysis.py000066400000000000000000000235561470142666300156150ustar00rootroot00000000000000# Copyright (C) 2007-2020 Anders Logg, Martin Alnaes, Kristian B. Oelgaard, # Michal Habera and others # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Compiler stage 1: Analysis. This module implements the analysis/preprocessing of variational forms, including automatic selection of elements, degrees and form representation type. """ from __future__ import annotations import logging import typing import basix.ufl import numpy as np import numpy.typing as npt import ufl logger = logging.getLogger("ffcx") class UFLData(typing.NamedTuple): """UFL data.""" # Tuple of ufl form data form_data: tuple[ufl.algorithms.formdata.FormData, ...] # List of unique elements unique_elements: list[basix.ufl._ElementBase] # Lookup table from each unique element to its index in `unique_elements` element_numbers: dict[basix.ufl._ElementBase, int] # List of unique coordinate elements unique_coordinate_elements: list[basix.ufl._ElementBase] # List of ufl Expressions as tuples (expression, points, original_expression) expressions: list[tuple[ufl.core.expr.Expr, npt.NDArray[np.float64], ufl.core.expr.Expr]] def analyze_ufl_objects( ufl_objects: list[ ufl.form.Form | ufl.AbstractFiniteElement | ufl.Mesh | tuple[ufl.core.expr.Expr, npt.NDArray[np.floating]] ], scalar_type: npt.DTypeLike, ) -> UFLData: """Analyze ufl object(s). Args: ufl_objects: UFL objects scalar_type: Scalar type that should be used for the analysis Returns: A data structure holding: form_datas: Form_data objects unique_elements: Unique elements across all forms and expressions element_numbers: Mapping to unique numbers for all elements unique_coordinate_elements: Unique coordinate elements across all forms and expressions expressions: List of all expressions after post-processing, with its evaluation points and the original expression """ logger.info(79 * "*") logger.info("Compiler stage 1: Analyzing UFL objects") logger.info(79 * "*") elements: list[ufl.AbstractFiniteElement] = [] coordinate_elements: list[ufl.AbstractFiniteElement] = [] # Group objects by types forms: list[ufl.form.Form] = [] expressions: list[tuple[ufl.core.expr.Expr, npt.NDArray[np.floating]]] = [] processed_expressions: list[ tuple[ufl.core.expr.Expr, npt.NDArray[np.floating], ufl.core.expr.Expr] ] = [] for ufl_object in ufl_objects: if isinstance(ufl_object, ufl.form.Form): forms.append(ufl_object) elif isinstance(ufl_object, ufl.AbstractFiniteElement): elements.append(ufl_object) elif isinstance(ufl_object, ufl.Mesh): coordinate_elements.append(ufl_object.ufl_coordinate_element()) elif isinstance(ufl_object[0], ufl.core.expr.Expr): original_expression = ufl_object[0] points = np.asarray(ufl_object[1]) expressions.append((original_expression, points)) else: raise TypeError("UFL objects not recognised.") form_data = tuple(_analyze_form(form, scalar_type) for form in forms) for data in form_data: elements += data.unique_sub_elements coordinate_elements += data.coordinate_elements for original_expression, points in expressions: elements += ufl.algorithms.extract_elements(original_expression) processed_expression = _analyze_expression(original_expression, scalar_type) processed_expressions += [(processed_expression, points, original_expression)] elements += ufl.algorithms.analysis.extract_sub_elements(elements) # Sort elements so sub-elements come before mixed elements unique_elements = ufl.algorithms.sort_elements(set(elements)) unique_coordinate_element_list = sorted(set(coordinate_elements), key=lambda x: repr(x)) for e in unique_elements: assert isinstance(e, basix.ufl._ElementBase) # Compute dict (map) from element to index element_numbers = {element: i for i, element in enumerate(unique_elements)} return UFLData( form_data=form_data, unique_elements=unique_elements, element_numbers=element_numbers, unique_coordinate_elements=unique_coordinate_element_list, expressions=processed_expressions, ) def _analyze_expression( expression: ufl.core.expr.Expr, scalar_type: npt.DTypeLike ) -> ufl.core.expr.Expr: """Analyzes and preprocesses expressions.""" preserve_geometry_types = (ufl.classes.Jacobian,) expression = ufl.algorithms.apply_algebra_lowering.apply_algebra_lowering(expression) expression = ufl.algorithms.apply_derivatives.apply_derivatives(expression) expression = ufl.algorithms.apply_function_pullbacks.apply_function_pullbacks(expression) expression = ufl.algorithms.apply_geometry_lowering.apply_geometry_lowering( expression, preserve_geometry_types ) expression = ufl.algorithms.apply_derivatives.apply_derivatives(expression) expression = ufl.algorithms.apply_geometry_lowering.apply_geometry_lowering( expression, preserve_geometry_types ) expression = ufl.algorithms.apply_derivatives.apply_derivatives(expression) # Remove complex nodes if scalar type is real valued if not np.issubdtype(scalar_type, np.complexfloating): expression = ufl.algorithms.remove_complex_nodes.remove_complex_nodes(expression) return expression def _analyze_form( form: ufl.form.Form, scalar_type: npt.DTypeLike ) -> ufl.algorithms.formdata.FormData: """Analyzes UFL form and attaches metadata. Args: form: forms scalar_type: Scalar type used for form. This is used to simplify real valued forms Returns: Form data computed by UFL with metadata attached Note: The main workload of this function is extraction of unique/default metadata from options, integral metadata or inherited from UFL (in case of quadrature degree). """ if form.empty(): raise RuntimeError(f"Form ({form}) seems to be zero: cannot compile it.") if _has_custom_integrals(form): raise RuntimeError(f"Form ({form}) contains unsupported custom integrals.") # Check that coordinate element is based on basix.ufl._ElementBase for i in form._integrals: assert isinstance(i._ufl_domain._ufl_coordinate_element, basix.ufl._ElementBase) # Check for complex mode complex_mode = np.issubdtype(scalar_type, np.complexfloating) # Compute form metadata form_data = ufl.algorithms.compute_form_data( form, do_apply_function_pullbacks=True, do_apply_integral_scaling=True, do_apply_geometry_lowering=True, preserve_geometry_types=(ufl.classes.Jacobian,), do_apply_restrictions=True, do_append_everywhere_integrals=False, # do not add dx integrals to dx(i) in UFL complex_mode=complex_mode, ) # Determine unique quadrature degree and quadrature scheme # per each integral data for id, integral_data in enumerate(form_data.integral_data): # Iterate through groups of integral data. There is one integral # data for all integrals with same domain, itype, subdomain_id # (but possibly different metadata). # # Quadrature degree and quadrature scheme must be the same for # all integrals in this integral data group, i.e. must be the # same for for the same (domain, itype, subdomain_id) for i, integral in enumerate(integral_data.integrals): metadata = integral.metadata() # If form contains a quadrature element, use the custom quadrature scheme custom_q = None for e in ufl.algorithms.extract_elements(integral): if e.has_custom_quadrature: if custom_q is None: custom_q = e.custom_quadrature() else: p, w = e.custom_quadrature() assert np.allclose(p, custom_q[0]) assert np.allclose(w, custom_q[1]) if custom_q is None: # Extract quadrature degree qd = -1 if "quadrature_degree" in metadata.keys(): qd = metadata["quadrature_degree"] # Sending in a negative quadrature degree means that we want to be # able to customize it at a later stage. if qd < 0: qd = np.max(integral.metadata()["estimated_polynomial_degree"]) # Extract quadrature rule qr = integral.metadata().get("quadrature_rule", "default") logger.info(f"Integral {i}, integral group {id}:") logger.info(f"--- quadrature rule: {qr}") logger.info(f"--- quadrature degree: {qd}") metadata.update({"quadrature_degree": qd, "quadrature_rule": qr}) else: metadata.update( { "quadrature_points": custom_q[0], "quadrature_weights": custom_q[1], "quadrature_rule": "custom", } ) integral_data.integrals[i] = integral.reconstruct(metadata=metadata) return form_data def _has_custom_integrals( o: ufl.integral.Integral | ufl.classes.Form | list | tuple, ) -> bool: """Check for custom integrals.""" if isinstance(o, ufl.integral.Integral): return o.integral_type() in ufl.custom_integral_types elif isinstance(o, ufl.classes.Form): return any(_has_custom_integrals(itg) for itg in o.integrals()) elif isinstance(o, (list, tuple)): return any(_has_custom_integrals(itg) for itg in o) else: raise NotImplementedError ffcx-0.9.0/ffcx/codegeneration/000077500000000000000000000000001470142666300163735ustar00rootroot00000000000000ffcx-0.9.0/ffcx/codegeneration/C/000077500000000000000000000000001470142666300165555ustar00rootroot00000000000000ffcx-0.9.0/ffcx/codegeneration/C/__init__.py000066400000000000000000000000341470142666300206630ustar00rootroot00000000000000"""Generation of C code.""" ffcx-0.9.0/ffcx/codegeneration/C/c_implementation.py000066400000000000000000000274001470142666300224610ustar00rootroot00000000000000# Copyright (C) 2023 Chris Richardson # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """C implementation.""" import warnings import numpy as np import numpy.typing as npt import ffcx.codegeneration.lnodes as L from ffcx.codegeneration.utils import dtype_to_c_type, dtype_to_scalar_dtype math_table = { "float64": { "sqrt": "sqrt", "abs": "fabs", "cos": "cos", "sin": "sin", "tan": "tan", "acos": "acos", "asin": "asin", "atan": "atan", "cosh": "cosh", "sinh": "sinh", "tanh": "tanh", "acosh": "acosh", "asinh": "asinh", "atanh": "atanh", "power": "pow", "exp": "exp", "ln": "log", "erf": "erf", "atan_2": "atan2", "min_value": "fmin", "max_value": "fmax", "bessel_y": "yn", "bessel_j": "jn", }, "float32": { "sqrt": "sqrtf", "abs": "fabsf", "cos": "cosf", "sin": "sinf", "tan": "tanf", "acos": "acosf", "asin": "asinf", "atan": "atanf", "cosh": "coshf", "sinh": "sinhf", "tanh": "tanhf", "acosh": "acoshf", "asinh": "asinhf", "atanh": "atanhf", "power": "powf", "exp": "expf", "ln": "logf", "erf": "erff", "atan_2": "atan2f", "min_value": "fminf", "max_value": "fmaxf", "bessel_y": "yn", "bessel_j": "jn", }, "longdouble": { "sqrt": "sqrtl", "abs": "fabsl", "cos": "cosl", "sin": "sinl", "tan": "tanl", "acos": "acosl", "asin": "asinl", "atan": "atanl", "cosh": "coshl", "sinh": "sinhl", "tanh": "tanhl", "acosh": "acoshl", "asinh": "asinhl", "atanh": "atanhl", "power": "powl", "exp": "expl", "ln": "logl", "erf": "erfl", "atan_2": "atan2l", "min_value": "fminl", "max_value": "fmaxl", }, "complex128": { "sqrt": "csqrt", "abs": "cabs", "cos": "ccos", "sin": "csin", "tan": "ctan", "acos": "cacos", "asin": "casin", "atan": "catan", "cosh": "ccosh", "sinh": "csinh", "tanh": "ctanh", "acosh": "cacosh", "asinh": "casinh", "atanh": "catanh", "power": "cpow", "exp": "cexp", "ln": "clog", "real": "creal", "imag": "cimag", "conj": "conj", "max_value": "fmax", "min_value": "fmin", "bessel_y": "yn", "bessel_j": "jn", }, "complex64": { "sqrt": "csqrtf", "abs": "cabsf", "cos": "ccosf", "sin": "csinf", "tan": "ctanf", "acos": "cacosf", "asin": "casinf", "atan": "catanf", "cosh": "ccoshf", "sinh": "csinhf", "tanh": "ctanhf", "acosh": "cacoshf", "asinh": "casinhf", "atanh": "catanhf", "power": "cpowf", "exp": "cexpf", "ln": "clogf", "real": "crealf", "imag": "cimagf", "conj": "conjf", "max_value": "fmaxf", "min_value": "fminf", "bessel_y": "yn", "bessel_j": "jn", }, } class CFormatter: """C formatter.""" scalar_type: np.dtype real_type: np.dtype def __init__(self, dtype: npt.DTypeLike) -> None: """Initialise.""" self.scalar_type = np.dtype(dtype) self.real_type = dtype_to_scalar_dtype(dtype) def _dtype_to_name(self, dtype) -> str: """Convert dtype to C name.""" if dtype == L.DataType.SCALAR: return dtype_to_c_type(self.scalar_type) if dtype == L.DataType.REAL: return dtype_to_c_type(self.real_type) if dtype == L.DataType.INT: return "int" if dtype == L.DataType.BOOL: return "bool" raise ValueError(f"Invalid dtype: {dtype}") def _format_number(self, x): """Format a number.""" # Use 16sf for precision (good for float64 or less) if isinstance(x, complex): return f"({x.real:.16}+I*{x.imag:.16})" elif isinstance(x, float): return f"{x:.16}" return str(x) def _build_initializer_lists(self, values): """Build initializer lists.""" arr = "{" if len(values.shape) == 1: arr += ", ".join(self._format_number(v) for v in values) elif len(values.shape) > 1: arr += ",\n ".join(self._build_initializer_lists(v) for v in values) arr += "}" return arr def format_statement_list(self, slist) -> str: """Format a statement list.""" return "".join(self.c_format(s) for s in slist.statements) def format_section(self, section) -> str: """Format a section.""" # add new line before section comments = "// ------------------------ \n" comments += "// Section: " + section.name + "\n" comments += "// Inputs: " + ", ".join(w.name for w in section.input) + "\n" comments += "// Outputs: " + ", ".join(w.name for w in section.output) + "\n" declarations = "".join(self.c_format(s) for s in section.declarations) body = "" if len(section.statements) > 0: declarations += "{\n " body = "".join(self.c_format(s) for s in section.statements) body = body.replace("\n", "\n ") body = body[:-2] + "}\n" body += "// ------------------------ \n" return comments + declarations + body def format_comment(self, c) -> str: """Format a comment.""" return "// " + c.comment + "\n" def format_array_decl(self, arr) -> str: """Format an array declaration.""" dtype = arr.symbol.dtype typename = self._dtype_to_name(dtype) symbol = self.c_format(arr.symbol) dims = "".join([f"[{i}]" for i in arr.sizes]) if arr.values is None: assert arr.const is False return f"{typename} {symbol}{dims};\n" vals = self._build_initializer_lists(arr.values) cstr = "static const " if arr.const else "" return f"{cstr}{typename} {symbol}{dims} = {vals};\n" def format_array_access(self, arr) -> str: """Format an array access.""" name = self.c_format(arr.array) indices = f"[{']['.join(self.c_format(i) for i in arr.indices)}]" return f"{name}{indices}" def format_variable_decl(self, v) -> str: """Format a variable declaration.""" val = self.c_format(v.value) symbol = self.c_format(v.symbol) typename = self._dtype_to_name(v.symbol.dtype) return f"{typename} {symbol} = {val};\n" def format_nary_op(self, oper) -> str: """Format an n-ary operation.""" # Format children args = [self.c_format(arg) for arg in oper.args] # Apply parentheses for i in range(len(args)): if oper.args[i].precedence >= oper.precedence: args[i] = "(" + args[i] + ")" # Return combined string return f" {oper.op} ".join(args) def format_binary_op(self, oper) -> str: """Format a binary operation.""" # Format children lhs = self.c_format(oper.lhs) rhs = self.c_format(oper.rhs) # Apply parentheses if oper.lhs.precedence >= oper.precedence: lhs = f"({lhs})" if oper.rhs.precedence >= oper.precedence: rhs = f"({rhs})" # Return combined string return f"{lhs} {oper.op} {rhs}" def format_unary_op(self, oper) -> str: """Format a unary operation.""" arg = self.c_format(oper.arg) if oper.arg.precedence >= oper.precedence: return f"{oper.op}({arg})" return f"{oper.op}{arg}" def format_literal_float(self, val) -> str: """Format a literal float.""" value = self._format_number(val.value) return f"{value}" def format_literal_int(self, val) -> str: """Format a literal int.""" return f"{val.value}" def format_for_range(self, r) -> str: """Format a for loop over a range.""" begin = self.c_format(r.begin) end = self.c_format(r.end) index = self.c_format(r.index) output = f"for (int {index} = {begin}; {index} < {end}; ++{index})\n" output += "{\n" body = self.c_format(r.body) for line in body.split("\n"): if len(line) > 0: output += f" {line}\n" output += "}\n" return output def format_statement(self, s) -> str: """Format a statement.""" return self.c_format(s.expr) def format_assign(self, expr) -> str: """Format an assignment.""" rhs = self.c_format(expr.rhs) lhs = self.c_format(expr.lhs) return f"{lhs} {expr.op} {rhs};\n" def format_conditional(self, s) -> str: """Format a conditional.""" # Format children c = self.c_format(s.condition) t = self.c_format(s.true) f = self.c_format(s.false) # Apply parentheses if s.condition.precedence >= s.precedence: c = "(" + c + ")" if s.true.precedence >= s.precedence: t = "(" + t + ")" if s.false.precedence >= s.precedence: f = "(" + f + ")" # Return combined string return c + " ? " + t + " : " + f def format_symbol(self, s) -> str: """Format a symbol.""" return f"{s.name}" def format_multi_index(self, mi) -> str: """Format a multi-index.""" return self.c_format(mi.global_index) def format_math_function(self, c) -> str: """Format a mathematical function.""" # Get a table of functions for this type, if available arg_type = self.scalar_type if hasattr(c.args[0], "dtype"): if c.args[0].dtype == L.DataType.REAL: arg_type = self.real_type else: warnings.warn(f"Syntax item without dtype {c.args[0]}") dtype_math_table = math_table[arg_type.name] # Get a function from the table, if available, else just use bare name func = dtype_math_table.get(c.function, c.function) args = ", ".join(self.c_format(arg) for arg in c.args) return f"{func}({args})" c_impl = { "Section": format_section, "StatementList": format_statement_list, "Comment": format_comment, "ArrayDecl": format_array_decl, "ArrayAccess": format_array_access, "MultiIndex": format_multi_index, "VariableDecl": format_variable_decl, "ForRange": format_for_range, "Statement": format_statement, "Assign": format_assign, "AssignAdd": format_assign, "Product": format_nary_op, "Neg": format_unary_op, "Sum": format_nary_op, "Add": format_binary_op, "Sub": format_binary_op, "Mul": format_binary_op, "Div": format_binary_op, "Not": format_unary_op, "LiteralFloat": format_literal_float, "LiteralInt": format_literal_int, "Symbol": format_symbol, "Conditional": format_conditional, "MathFunction": format_math_function, "And": format_binary_op, "Or": format_binary_op, "NE": format_binary_op, "EQ": format_binary_op, "GE": format_binary_op, "LE": format_binary_op, "GT": format_binary_op, "LT": format_binary_op, } def c_format(self, s) -> str: """Format as C.""" name = s.__class__.__name__ try: return self.c_impl[name](self, s) except KeyError: raise RuntimeError("Unknown statement: ", name) ffcx-0.9.0/ffcx/codegeneration/C/expressions.py000066400000000000000000000107021470142666300215110ustar00rootroot00000000000000# Copyright (C) 2019 Michal Habera # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Generate UFC code for an expression.""" from __future__ import annotations import logging import numpy as np from ffcx.codegeneration.backend import FFCXBackend from ffcx.codegeneration.C import expressions_template from ffcx.codegeneration.C.c_implementation import CFormatter from ffcx.codegeneration.expression_generator import ExpressionGenerator from ffcx.codegeneration.utils import dtype_to_c_type, dtype_to_scalar_dtype from ffcx.ir.representation import ExpressionIR logger = logging.getLogger("ffcx") def generator(ir: ExpressionIR, options): """Generate UFC code for an expression.""" logger.info("Generating code for expression:") assert len(ir.expression.integrand) == 1, "Expressions only support single quadrature rule" points = next(iter(ir.expression.integrand)).points logger.info(f"--- points: {points}") factory_name = ir.expression.name logger.info(f"--- name: {factory_name}") # Format declaration declaration = expressions_template.declaration.format( factory_name=factory_name, name_from_uflfile=ir.name_from_uflfile ) backend = FFCXBackend(ir, options) eg = ExpressionGenerator(ir, backend) d: dict[str, str | int] = {} d["name_from_uflfile"] = ir.name_from_uflfile d["factory_name"] = factory_name parts = eg.generate() CF = CFormatter(options["scalar_type"]) d["tabulate_expression"] = CF.c_format(parts) if len(ir.original_coefficient_positions) > 0: d["original_coefficient_positions"] = f"original_coefficient_positions_{factory_name}" values = ", ".join(str(i) for i in ir.original_coefficient_positions) sizes = len(ir.original_coefficient_positions) d["original_coefficient_positions_init"] = ( f"static int original_coefficient_positions_{factory_name}[{sizes}] = {{{values}}};" ) else: d["original_coefficient_positions"] = "NULL" d["original_coefficient_positions_init"] = "" values = ", ".join(str(p) for p in points.flatten()) sizes = points.size d["points_init"] = f"static double points_{factory_name}[{sizes}] = {{{values}}};" d["points"] = f"points_{factory_name}" if len(ir.expression.shape) > 0: values = ", ".join(str(i) for i in ir.expression.shape) sizes = len(ir.expression.shape) d["value_shape_init"] = f"static int value_shape_{factory_name}[{sizes}] = {{{values}}};" d["value_shape"] = f"value_shape_{factory_name}" else: d["value_shape_init"] = "" d["value_shape"] = "NULL" d["num_components"] = len(ir.expression.shape) d["num_coefficients"] = len(ir.expression.coefficient_numbering) d["num_constants"] = len(ir.constant_names) d["num_points"] = points.shape[0] d["entity_dimension"] = points.shape[1] d["scalar_type"] = dtype_to_c_type(options["scalar_type"]) d["geom_type"] = dtype_to_c_type(dtype_to_scalar_dtype(options["scalar_type"])) d["np_scalar_type"] = np.dtype(options["scalar_type"]).name d["rank"] = len(ir.expression.tensor_shape) if len(ir.coefficient_names) > 0: values = ", ".join(f'"{name}"' for name in ir.coefficient_names) sizes = len(ir.coefficient_names) d["coefficient_names_init"] = ( f"static const char* coefficient_names_{factory_name}[{sizes}] = {{{values}}};" ) d["coefficient_names"] = f"coefficient_names_{factory_name}" else: d["coefficient_names_init"] = "" d["coefficient_names"] = "NULL" if len(ir.constant_names) > 0: values = ", ".join(f'"{name}"' for name in ir.constant_names) sizes = len(ir.constant_names) d["constant_names_init"] = ( f"static const char* constant_names_{factory_name}[{sizes}] = {{{values}}};" ) d["constant_names"] = f"constant_names_{factory_name}" else: d["constant_names_init"] = "" d["constant_names"] = "NULL" # Check that no keys are redundant or have been missed from string import Formatter fields = [fname for _, fname, _, _ in Formatter().parse(expressions_template.factory) if fname] assert set(fields) == set(d.keys()), "Mismatch between keys in template and in formatting dict" # Format implementation code implementation = expressions_template.factory.format_map(d) return declaration, implementation ffcx-0.9.0/ffcx/codegeneration/C/expressions_template.py000066400000000000000000000033131470142666300234040ustar00rootroot00000000000000# Copyright (C) 2019 Michal Habera # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Code generation strings for an expression.""" declaration = """ extern ufcx_expression {factory_name}; // Helper used to create expression using name which was given to the // expression in the UFL file. // This helper is called in user c++ code. // extern ufcx_expression* {name_from_uflfile}; """ factory = """ // Code for expression {factory_name} void tabulate_tensor_{factory_name}({scalar_type}* restrict A, const {scalar_type}* restrict w, const {scalar_type}* restrict c, const {geom_type}* restrict coordinate_dofs, const int* restrict entity_local_index, const uint8_t* restrict quadrature_permutation) {{ {tabulate_expression} }} {points_init} {value_shape_init} {original_coefficient_positions_init} {coefficient_names_init} {constant_names_init} ufcx_expression {factory_name} = {{ .tabulate_tensor_{np_scalar_type} = tabulate_tensor_{factory_name}, .num_coefficients = {num_coefficients}, .num_constants = {num_constants}, .original_coefficient_positions = {original_coefficient_positions}, .coefficient_names = {coefficient_names}, .constant_names = {constant_names}, .num_points = {num_points}, .entity_dimension = {entity_dimension}, .points = {points}, .value_shape = {value_shape}, .num_components = {num_components}, .rank = {rank}, }}; // Alias name ufcx_expression* {name_from_uflfile} = &{factory_name}; // End of code for expression {factory_name} """ ffcx-0.9.0/ffcx/codegeneration/C/file.py000066400000000000000000000026261470142666300200540ustar00rootroot00000000000000# Copyright (C) 2009-2018 Anders Logg, Martin Sandve Alnæs and Garth N. Wells # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later # # Note: Most of the code in this file is a direct translation from the # old implementation in FFC """Generate a file.""" import logging import pprint import textwrap import numpy as np from ffcx import __version__ as FFCX_VERSION from ffcx.codegeneration import __version__ as UFC_VERSION from ffcx.codegeneration.C import file_template logger = logging.getLogger("ffcx") def generator(options): """Generate UFC code for file output.""" logger.info("Generating code for file") # Attributes d = {"ffcx_version": FFCX_VERSION, "ufcx_version": UFC_VERSION} d["options"] = textwrap.indent(pprint.pformat(options), "// ") extra_c_includes = [] if np.issubdtype(options["scalar_type"], np.complexfloating): extra_c_includes += ["complex.h"] d["extra_c_includes"] = "\n".join(f"#include <{header}>" for header in extra_c_includes) # Format declaration code code_pre = ( file_template.declaration_pre.format_map(d), file_template.implementation_pre.format_map(d), ) # Format implementation code code_post = ( file_template.declaration_post.format_map(d), file_template.implementation_post.format_map(d), ) return code_pre, code_post ffcx-0.9.0/ffcx/codegeneration/C/file_template.py000066400000000000000000000021041470142666300217360ustar00rootroot00000000000000# Code generation format strings for UFC (Unified Form-assembly Code) # This code is released into the public domain. # # The FEniCS Project (http://www.fenicsproject.org/) 2018. """Code generation strings for a file.""" import sys declaration_pre = """ // This code conforms with the UFC specification version {ufcx_version} // and was automatically generated by FFCx version {ffcx_version}. // // This code was generated with the following options: // {options} #pragma once #include #ifdef __cplusplus extern "C" {{ #endif """ declaration_post = """ #ifdef __cplusplus }} #endif """ implementation_pre = """ // This code conforms with the UFC specification version {ufcx_version} // and was automatically generated by FFCx version {ffcx_version}. // // This code was generated with the following options: // {options} #include #include #include #include #include {extra_c_includes} """ if sys.platform.startswith("win32"): libraries: list[str] = [] else: libraries: list[str] = ["m"] implementation_post = "" ffcx-0.9.0/ffcx/codegeneration/C/form.py000066400000000000000000000120071470142666300200720ustar00rootroot00000000000000# Copyright (C) 2009-2017 Anders Logg and Martin Sandve Alnæs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later # # Modified by Chris Richardson and Jørgen S. Dokken 2023 # # Note: Most of the code in this file is a direct translation from the # old implementation in FFC """Generate UFC code for a form.""" from __future__ import annotations import logging import numpy as np from ffcx.codegeneration.C import form_template from ffcx.ir.representation import FormIR logger = logging.getLogger("ffcx") def generator(ir: FormIR, options): """Generate UFC code for a form.""" logger.info("Generating code for form:") logger.info(f"--- rank: {ir.rank}") logger.info(f"--- name: {ir.name}") d: dict[str, int | str] = {} d["factory_name"] = ir.name d["name_from_uflfile"] = ir.name_from_uflfile d["signature"] = f'"{ir.signature}"' d["rank"] = ir.rank d["num_coefficients"] = ir.num_coefficients d["num_constants"] = ir.num_constants if len(ir.original_coefficient_positions) > 0: values = ", ".join(str(i) for i in ir.original_coefficient_positions) sizes = len(ir.original_coefficient_positions) d["original_coefficient_position_init"] = ( f"int original_coefficient_position_{ir.name}[{sizes}] = {{{values}}};" ) d["original_coefficient_positions"] = f"original_coefficient_position_{ir.name}" else: d["original_coefficient_position_init"] = "" d["original_coefficient_positions"] = "NULL" if len(ir.coefficient_names) > 0: values = ", ".join(f'"{name}"' for name in ir.coefficient_names) sizes = len(ir.coefficient_names) d["coefficient_names_init"] = ( f"static const char* coefficient_names_{ir.name}[{sizes}] = {{{values}}};" ) d["coefficient_names"] = f"coefficient_names_{ir.name}" else: d["coefficient_names_init"] = "" d["coefficient_names"] = "NULL" if len(ir.constant_names) > 0: values = ", ".join(f'"{name}"' for name in ir.constant_names) sizes = len(ir.constant_names) d["constant_names_init"] = ( f"static const char* constant_names_{ir.name}[{sizes}] = {{{values}}};" ) d["constant_names"] = f"constant_names_{ir.name}" else: d["constant_names_init"] = "" d["constant_names"] = "NULL" if len(ir.finite_element_hashes) > 0: d["finite_element_hashes"] = f"finite_element_hashes_{ir.name}" values = ", ".join( f"UINT64_C({0 if el is None else el})" for el in ir.finite_element_hashes ) sizes = len(ir.finite_element_hashes) d["finite_element_hashes_init"] = ( f"uint64_t finite_element_hashes_{ir.name}[{sizes}] = {{{values}}};" ) else: d["finite_element_hashes"] = "NULL" d["finite_element_hashes_init"] = "" integrals = [] integral_ids = [] integral_offsets = [0] # Note: the order of this list is defined by the enum ufcx_integral_type in ufcx.h for itg_type in ("cell", "exterior_facet", "interior_facet"): unsorted_integrals = [] unsorted_ids = [] for name, id in zip(ir.integral_names[itg_type], ir.subdomain_ids[itg_type]): unsorted_integrals += [f"&{name}"] unsorted_ids += [id] id_sort = np.argsort(unsorted_ids) integrals += [unsorted_integrals[i] for i in id_sort] integral_ids += [unsorted_ids[i] for i in id_sort] integral_offsets.append(len(integrals)) if len(integrals) > 0: sizes = len(integrals) values = ", ".join(integrals) d["form_integrals_init"] = ( f"static ufcx_integral* form_integrals_{ir.name}[{sizes}] = {{{values}}};" ) d["form_integrals"] = f"form_integrals_{ir.name}" sizes = len(integral_ids) values = ", ".join(str(i) for i in integral_ids) d["form_integral_ids_init"] = f"int form_integral_ids_{ir.name}[{sizes}] = {{{values}}};" d["form_integral_ids"] = f"form_integral_ids_{ir.name}" else: d["form_integrals_init"] = "" d["form_integrals"] = "NULL" d["form_integral_ids_init"] = "" d["form_integral_ids"] = "NULL" sizes = len(integral_offsets) values = ", ".join(str(i) for i in integral_offsets) d["form_integral_offsets_init"] = ( f"int form_integral_offsets_{ir.name}[{sizes}] = {{{values}}};" ) # Check that no keys are redundant or have been missed from string import Formatter fields = [fname for _, fname, _, _ in Formatter().parse(form_template.factory) if fname] assert set(fields) == set(d.keys()), "Mismatch between keys in template and in formatting dict" # Format implementation code implementation = form_template.factory.format_map(d) # Format declaration declaration = form_template.declaration.format( factory_name=d["factory_name"], name_from_uflfile=d["name_from_uflfile"] ) return declaration, implementation ffcx-0.9.0/ffcx/codegeneration/C/form_template.py000066400000000000000000000024471470142666300217740ustar00rootroot00000000000000# Code generation format strings for UFC (Unified Form-assembly Code) # This code is released into the public domain. # # The FEniCS Project (http://www.fenicsproject.org/) 2020. """Code generation strings for a form.""" declaration = """ extern ufcx_form {factory_name}; // Helper used to create form using name which was given to the // form in the UFL file. // This helper is called in user c++ code. // extern ufcx_form* {name_from_uflfile}; """ factory = """ // Code for form {factory_name} {original_coefficient_position_init} {finite_element_hashes_init} {form_integral_offsets_init} {form_integrals_init} {form_integral_ids_init} {coefficient_names_init} {constant_names_init} ufcx_form {factory_name} = {{ .signature = {signature}, .rank = {rank}, .num_coefficients = {num_coefficients}, .num_constants = {num_constants}, .original_coefficient_positions = {original_coefficient_positions}, .coefficient_name_map = {coefficient_names}, .constant_name_map = {constant_names}, .finite_element_hashes = {finite_element_hashes}, .form_integrals = {form_integrals}, .form_integral_ids = {form_integral_ids}, .form_integral_offsets = form_integral_offsets_{factory_name} }}; // Alias name ufcx_form* {name_from_uflfile} = &{factory_name}; // End of code for form {factory_name} """ ffcx-0.9.0/ffcx/codegeneration/C/integrals.py000066400000000000000000000071421470142666300211230ustar00rootroot00000000000000# Copyright (C) 2015-2021 Martin Sandve Alnæs, Michal Habera, Igor Baratta # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Generate UFC code for an integral.""" import logging import sys import numpy as np from ffcx.codegeneration.backend import FFCXBackend from ffcx.codegeneration.C import integrals_template as ufcx_integrals from ffcx.codegeneration.C.c_implementation import CFormatter from ffcx.codegeneration.integral_generator import IntegralGenerator from ffcx.codegeneration.utils import dtype_to_c_type, dtype_to_scalar_dtype from ffcx.ir.representation import IntegralIR logger = logging.getLogger("ffcx") def generator(ir: IntegralIR, options): """Generate C code for an integral.""" logger.info("Generating code for integral:") logger.info(f"--- type: {ir.expression.integral_type}") logger.info(f"--- name: {ir.expression.name}") """Generate code for an integral.""" factory_name = ir.expression.name # Format declaration declaration = ufcx_integrals.declaration.format(factory_name=factory_name) # Create FFCx C backend backend = FFCXBackend(ir, options) # Configure kernel generator ig = IntegralGenerator(ir, backend) # Generate code ast for the tabulate_tensor body parts = ig.generate() # Format code as string CF = CFormatter(options["scalar_type"]) body = CF.c_format(parts) # Generate generic FFCx code snippets and add specific parts code = {} if len(ir.enabled_coefficients) > 0: values = ", ".join("1" if i else "0" for i in ir.enabled_coefficients) sizes = len(ir.enabled_coefficients) code["enabled_coefficients_init"] = ( f"bool enabled_coefficients_{ir.expression.name}[{sizes}] = {{{values}}};" ) code["enabled_coefficients"] = f"enabled_coefficients_{ir.expression.name}" else: code["enabled_coefficients_init"] = "" code["enabled_coefficients"] = "NULL" code["tabulate_tensor"] = body code["tabulate_tensor_float32"] = ".tabulate_tensor_float32 = NULL," code["tabulate_tensor_float64"] = ".tabulate_tensor_float64 = NULL," if sys.platform.startswith("win32"): code["tabulate_tensor_complex64"] = "" code["tabulate_tensor_complex128"] = "" else: code["tabulate_tensor_complex64"] = ".tabulate_tensor_complex64 = NULL," code["tabulate_tensor_complex128"] = ".tabulate_tensor_complex128 = NULL," np_scalar_type = np.dtype(options["scalar_type"]).name code[f"tabulate_tensor_{np_scalar_type}"] = ( f".tabulate_tensor_{np_scalar_type} = tabulate_tensor_{factory_name}," ) element_hash = 0 if ir.coordinate_element_hash is None else ir.coordinate_element_hash implementation = ufcx_integrals.factory.format( factory_name=factory_name, enabled_coefficients=code["enabled_coefficients"], enabled_coefficients_init=code["enabled_coefficients_init"], tabulate_tensor=code["tabulate_tensor"], needs_facet_permutations="true" if ir.expression.needs_facet_permutations else "false", scalar_type=dtype_to_c_type(options["scalar_type"]), geom_type=dtype_to_c_type(dtype_to_scalar_dtype(options["scalar_type"])), coordinate_element_hash=f"UINT64_C({element_hash})", tabulate_tensor_float32=code["tabulate_tensor_float32"], tabulate_tensor_float64=code["tabulate_tensor_float64"], tabulate_tensor_complex64=code["tabulate_tensor_complex64"], tabulate_tensor_complex128=code["tabulate_tensor_complex128"], ) return declaration, implementation ffcx-0.9.0/ffcx/codegeneration/C/integrals_template.py000066400000000000000000000022601470142666300230120ustar00rootroot00000000000000# Code generation format strings for UFC (Unified Form-assembly Code) # This code is released into the public domain. # # The FEniCS Project (http://www.fenicsproject.org/) 2018 """Code generation strings for an integral.""" declaration = """ extern ufcx_integral {factory_name}; """ factory = """ // Code for integral {factory_name} void tabulate_tensor_{factory_name}({scalar_type}* restrict A, const {scalar_type}* restrict w, const {scalar_type}* restrict c, const {geom_type}* restrict coordinate_dofs, const int* restrict entity_local_index, const uint8_t* restrict quadrature_permutation) {{ {tabulate_tensor} }} {enabled_coefficients_init} ufcx_integral {factory_name} = {{ .enabled_coefficients = {enabled_coefficients}, {tabulate_tensor_float32} {tabulate_tensor_float64} {tabulate_tensor_complex64} {tabulate_tensor_complex128} .needs_facet_permutations = {needs_facet_permutations}, .coordinate_element_hash = {coordinate_element_hash}, }}; // End of code for integral {factory_name} """ ffcx-0.9.0/ffcx/codegeneration/__init__.py000066400000000000000000000015761470142666300205150ustar00rootroot00000000000000"""FFCx code generation.""" import hashlib import os # Version of FFCx header files __author__ = "FEniCS Project" __license__ = "This code is released into the public domain" __version__ = "2018.2.0.dev0" # Get abspath on import, it can in some cases be a relative path w.r.t. # curdir on startup _include_path = os.path.dirname(os.path.abspath(__file__)) def get_include_path(): """Return location of UFCx header files.""" return _include_path def _compute_signature(): """Compute signature of UFCx header files.""" h = hashlib.sha1() with open(os.path.join(get_include_path(), "ufcx.h")) as f: h.update(f.read().encode("utf-8")) return h.hexdigest() _signature = _compute_signature() def get_signature(): """Return SHA-1 hash of the contents of ufcx.h. In this implementation, the value is computed on import. """ return _signature ffcx-0.9.0/ffcx/codegeneration/access.py000066400000000000000000000457401470142666300202200ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """FFCx/UFC specific variable access.""" import logging import warnings from typing import Optional import basix.ufl import ufl import ffcx.codegeneration.lnodes as L from ffcx.ir.analysis.modified_terminals import ModifiedTerminal from ffcx.ir.elementtables import UniqueTableReferenceT from ffcx.ir.representationutils import QuadratureRule logger = logging.getLogger("ffcx") class FFCXBackendAccess: """FFCx specific formatter class.""" def __init__(self, entity_type: str, integral_type: str, symbols, options): """Initialise.""" # Store ir and options self.entity_type = entity_type self.integral_type = integral_type self.symbols = symbols self.options = options # Lookup table for handler to call when the "get" method (below) is # called, depending on the first argument type. self.call_lookup = { ufl.coefficient.Coefficient: self.coefficient, ufl.constant.Constant: self.constant, ufl.geometry.Jacobian: self.jacobian, ufl.geometry.CellCoordinate: self.cell_coordinate, ufl.geometry.FacetCoordinate: self.facet_coordinate, ufl.geometry.CellVertices: self.cell_vertices, ufl.geometry.FacetEdgeVectors: self.facet_edge_vectors, ufl.geometry.CellEdgeVectors: self.cell_edge_vectors, ufl.geometry.CellFacetJacobian: self.cell_facet_jacobian, ufl.geometry.ReferenceCellVolume: self.reference_cell_volume, ufl.geometry.ReferenceFacetVolume: self.reference_facet_volume, ufl.geometry.ReferenceCellEdgeVectors: self.reference_cell_edge_vectors, ufl.geometry.ReferenceFacetEdgeVectors: self.reference_facet_edge_vectors, ufl.geometry.ReferenceNormal: self.reference_normal, ufl.geometry.CellOrientation: self._pass, ufl.geometry.FacetOrientation: self.facet_orientation, ufl.geometry.SpatialCoordinate: self.spatial_coordinate, } def get( self, mt: ModifiedTerminal, tabledata: UniqueTableReferenceT, quadrature_rule: QuadratureRule, ): """Format a terminal.""" e = mt.terminal # Call appropriate handler, depending on the type of e handler = self.call_lookup.get(type(e), False) if not handler: # Look for parent class types instead for k in self.call_lookup.keys(): if isinstance(e, k): handler = self.call_lookup[k] break if handler: return handler(mt, tabledata, quadrature_rule) # type: ignore else: raise RuntimeError(f"Not handled: {type(e)}") def coefficient( self, mt: ModifiedTerminal, tabledata: UniqueTableReferenceT, quadrature_rule: QuadratureRule, ): """Access a coefficient.""" ttype = tabledata.ttype assert ttype != "zeros" num_dofs = tabledata.values.shape[3] begin = tabledata.offset end = begin + tabledata.block_size * (num_dofs - 1) + 1 if ttype == "ones" and (end - begin) == 1: # f = 1.0 * f_{begin}, just return direct reference to dof # array at dof begin (if mt is restricted, begin contains # cell offset) return self.symbols.coefficient_dof_access(mt.terminal, begin) else: # Return symbol, see definitions for computation return self.symbols.coefficient_value(mt) def constant( self, mt: ModifiedTerminal, tabledata: Optional[UniqueTableReferenceT], quadrature_rule: Optional[QuadratureRule], ): """Access a constant.""" # Access to a constant is handled trivially, directly through constants symbol return self.symbols.constant_index_access(mt.terminal, mt.flat_component) def spatial_coordinate( self, mt: ModifiedTerminal, tabledata: UniqueTableReferenceT, num_points: QuadratureRule ): """Access a spatial coordinate.""" if mt.global_derivatives: raise RuntimeError("Not expecting global derivatives of SpatialCoordinate.") if mt.averaged is not None: raise RuntimeError("Not expecting average of SpatialCoordinates.") if self.integral_type in ufl.custom_integral_types: if mt.local_derivatives: raise RuntimeError("FIXME: Jacobian in custom integrals is not implemented.") # Access predefined quadrature points table x = self.symbols.custom_points_table iq = self.symbols.quadrature_loop_index (gdim,) = mt.terminal.ufl_shape if gdim == 1: index = iq else: index = iq * gdim + mt.flat_component return x[index] elif self.integral_type == "expression": # Physical coordinates are computed by code generated in # definitions return self.symbols.x_component(mt) else: # Physical coordinates are computed by code generated in # definitions return self.symbols.x_component(mt) def cell_coordinate(self, mt, tabledata, num_points): """Access a cell coordinate.""" if mt.global_derivatives: raise RuntimeError("Not expecting derivatives of CellCoordinate.") if mt.local_derivatives: raise RuntimeError("Not expecting derivatives of CellCoordinate.") if mt.averaged is not None: raise RuntimeError("Not expecting average of CellCoordinate.") if self.integral_type == "cell" and not mt.restriction: # Access predefined quadrature points table X = self.symbols.points_table(num_points) (tdim,) = mt.terminal.ufl_shape iq = self.symbols.quadrature_loop_index() if num_points == 1: index = mt.flat_component elif tdim == 1: index = iq else: index = iq * tdim + mt.flat_component return X[index] else: # X should be computed from x or Xf symbolically instead of # getting here raise RuntimeError("Expecting reference cell coordinate to be symbolically rewritten.") def facet_coordinate(self, mt, tabledata, num_points): """Access a facet coordinate.""" if mt.global_derivatives: raise RuntimeError("Not expecting derivatives of FacetCoordinate.") if mt.local_derivatives: raise RuntimeError("Not expecting derivatives of FacetCoordinate.") if mt.averaged is not None: raise RuntimeError("Not expecting average of FacetCoordinate.") if mt.restriction: raise RuntimeError("Not expecting restriction of FacetCoordinate.") if self.integral_type in ("interior_facet", "exterior_facet"): (tdim,) = mt.terminal.ufl_shape if tdim == 0: raise RuntimeError("Vertices have no facet coordinates.") elif tdim == 1: warnings.warn( "Vertex coordinate is always 0, should get rid of this in UFL " "geometry lowering." ) return L.LiteralFloat(0.0) Xf = self.points_table(num_points) iq = self.symbols.quadrature_loop_index() assert 0 <= mt.flat_component < (tdim - 1) if num_points == 1: index = mt.flat_component elif tdim == 2: index = iq else: index = iq * (tdim - 1) + mt.flat_component return Xf[index] else: # Xf should be computed from X or x symbolically instead of # getting here raise RuntimeError("Expecting reference facet coordinate to be symbolically rewritten.") def jacobian(self, mt, tabledata, num_points): """Access a jacobian.""" if mt.averaged is not None: raise RuntimeError("Not expecting average of Jacobian.") return self.symbols.J_component(mt) def reference_cell_volume(self, mt, tabledata, access): """Access a reference cell volume.""" cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("interval", "triangle", "tetrahedron", "quadrilateral", "hexahedron"): return L.Symbol(f"{cellname}_reference_cell_volume", dtype=L.DataType.REAL) else: raise RuntimeError(f"Unhandled cell types {cellname}.") def reference_facet_volume(self, mt, tabledata, access): """Access a reference facet volume.""" cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("interval", "triangle", "tetrahedron", "quadrilateral", "hexahedron"): return L.Symbol(f"{cellname}_reference_facet_volume", dtype=L.DataType.REAL) else: raise RuntimeError(f"Unhandled cell types {cellname}.") def reference_normal(self, mt, tabledata, access): """Access a reference normal.""" cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("interval", "triangle", "tetrahedron", "quadrilateral", "hexahedron"): table = L.Symbol(f"{cellname}_reference_facet_normals", dtype=L.DataType.REAL) facet = self.symbols.entity("facet", mt.restriction) return table[facet][mt.component[0]] else: raise RuntimeError(f"Unhandled cell types {cellname}.") def cell_facet_jacobian(self, mt, tabledata, num_points): """Access a cell facet jacobian.""" cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("triangle", "tetrahedron", "quadrilateral", "hexahedron"): table = L.Symbol(f"{cellname}_reference_facet_jacobian", dtype=L.DataType.REAL) facet = self.symbols.entity("facet", mt.restriction) return table[facet][mt.component[0]][mt.component[1]] elif cellname == "interval": raise RuntimeError("The reference facet jacobian doesn't make sense for interval cell.") else: raise RuntimeError(f"Unhandled cell types {cellname}.") def reference_cell_edge_vectors(self, mt, tabledata, num_points): """Access a reference cell edge vector.""" cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("triangle", "tetrahedron", "quadrilateral", "hexahedron"): table = L.Symbol(f"{cellname}_reference_edge_vectors", dtype=L.DataType.REAL) return table[mt.component[0]][mt.component[1]] elif cellname == "interval": raise RuntimeError( "The reference cell edge vectors doesn't make sense for interval cell." ) else: raise RuntimeError(f"Unhandled cell types {cellname}.") def reference_facet_edge_vectors(self, mt, tabledata, num_points): """Access a reference facet edge vector.""" cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname in ("tetrahedron", "hexahedron"): table = L.Symbol(f"{cellname}_reference_edge_vectors", dtype=L.DataType.REAL) facet = self.symbols.entity("facet", mt.restriction) return table[facet][mt.component[0]][mt.component[1]] elif cellname in ("interval", "triangle", "quadrilateral"): raise RuntimeError( "The reference cell facet edge vectors doesn't make sense for interval " "or triangle cell." ) else: raise RuntimeError(f"Unhandled cell types {cellname}.") def facet_orientation(self, mt, tabledata, num_points): """Access a facet orientation.""" cellname = ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() if cellname not in ("interval", "triangle", "tetrahedron"): raise RuntimeError(f"Unhandled cell types {cellname}.") table = L.Symbol(f"{cellname}_facet_orientations", dtype=L.DataType.INT) facet = self.symbols.entity("facet", mt.restriction) return table[facet] def cell_vertices(self, mt, tabledata, num_points): """Access a cell vertex.""" # Get properties of domain domain = ufl.domain.extract_unique_domain(mt.terminal) gdim = domain.geometric_dimension() coordinate_element = domain.ufl_coordinate_element() # Get dimension and dofmap of scalar element assert isinstance(coordinate_element, basix.ufl._BlockedElement) assert coordinate_element.reference_value_shape == (gdim,) (ufl_scalar_element,) = set(coordinate_element.sub_elements) scalar_element = ufl_scalar_element assert scalar_element.reference_value_size == 1 and scalar_element.block_size == 1 vertex_scalar_dofs = scalar_element.entity_dofs[0] num_scalar_dofs = scalar_element.dim # Get dof and component (dof,) = vertex_scalar_dofs[mt.component[0]] component = mt.component[1] expr = self.symbols.domain_dof_access(dof, component, gdim, num_scalar_dofs, mt.restriction) return expr def cell_edge_vectors(self, mt, tabledata, num_points): """Access a cell edge vector.""" # Get properties of domain domain = ufl.domain.extract_unique_domain(mt.terminal) cellname = domain.ufl_cell().cellname() gdim = domain.geometric_dimension() coordinate_element = domain.ufl_coordinate_element() if cellname in ("triangle", "tetrahedron", "quadrilateral", "hexahedron"): pass elif cellname == "interval": raise RuntimeError( "The physical cell edge vectors doesn't make sense for interval cell." ) else: raise RuntimeError(f"Unhandled cell types {cellname}.") # Get dimension and dofmap of scalar element assert isinstance(coordinate_element, basix.ufl._BlockedElement) assert coordinate_element.reference_value_shape == (gdim,) (ufl_scalar_element,) = set(coordinate_element.sub_elements) scalar_element = ufl_scalar_element assert scalar_element.reference_value_size == 1 and scalar_element.block_size == 1 vertex_scalar_dofs = scalar_element.entity_dofs[0] num_scalar_dofs = scalar_element.dim # Get edge vertices edge = mt.component[0] vertex0, vertex1 = scalar_element.reference_topology[1][edge] # Get dofs and component (dof0,) = vertex_scalar_dofs[vertex0] (dof1,) = vertex_scalar_dofs[vertex1] component = mt.component[1] return self.symbols.domain_dof_access( dof0, component, gdim, num_scalar_dofs, mt.restriction ) - self.symbols.domain_dof_access(dof1, component, gdim, num_scalar_dofs, mt.restriction) def facet_edge_vectors(self, mt, tabledata, num_points): """Access a facet edge vector.""" # Get properties of domain domain = ufl.domain.extract_unique_domain(mt.terminal) cellname = domain.ufl_cell().cellname() gdim = domain.geometric_dimension() coordinate_element = domain.ufl_coordinate_element() if cellname in ("tetrahedron", "hexahedron"): pass elif cellname in ("interval", "triangle", "quadrilateral"): raise RuntimeError( f"The physical facet edge vectors doesn't make sense for {cellname} cell." ) else: raise RuntimeError(f"Unhandled cell types {cellname}.") # Get dimension and dofmap of scalar element assert isinstance(coordinate_element, basix.ufl._BlockedElement) assert coordinate_element.reference_value_shape == (gdim,) (ufl_scalar_element,) = set(coordinate_element.sub_elements) scalar_element = ufl_scalar_element assert scalar_element.reference_value_size == 1 and scalar_element.block_size == 1 scalar_element = ufl_scalar_element num_scalar_dofs = scalar_element.dim # Get edge vertices facet = self.symbols.entity("facet", mt.restriction) facet_edge = mt.component[0] facet_edge_vertices = L.Symbol(f"{cellname}_facet_edge_vertices", dtype=L.DataType.INT) vertex0 = facet_edge_vertices[facet][facet_edge][0] vertex1 = facet_edge_vertices[facet][facet_edge][1] # Get dofs and component component = mt.component[1] assert coordinate_element.embedded_superdegree == 1, "Assuming degree 1 element" dof0 = vertex0 dof1 = vertex1 expr = self.symbols.domain_dof_access( dof0, component, gdim, num_scalar_dofs, mt.restriction ) - self.symbols.domain_dof_access(dof1, component, gdim, num_scalar_dofs, mt.restriction) return expr def _pass(self, *args, **kwargs): """Return one.""" return 1 def table_access( self, tabledata: UniqueTableReferenceT, entity_type: str, restriction: str, quadrature_index: L.MultiIndex, dof_index: L.MultiIndex, ): """Access element table for given entity, quadrature point, and dof index. Args: tabledata: Table data object entity_type: Entity type ("cell", "facet", "vertex") restriction: Restriction ("+", "-") quadrature_index: Quadrature index dof_index: Dof index """ entity = self.symbols.entity(entity_type, restriction) iq_global_index = quadrature_index.global_index ic_global_index = dof_index.global_index qp = 0 # quadrature permutation symbols = [] if tabledata.is_uniform: entity = L.LiteralInt(0) if tabledata.is_piecewise: iq_global_index = L.LiteralInt(0) # FIXME: Hopefully tabledata is not permuted when applying sum # factorization if tabledata.is_permuted: qp = self.symbols.quadrature_permutation[0] if restriction == "-": qp = self.symbols.quadrature_permutation[1] if dof_index.dim == 1 and quadrature_index.dim == 1: symbols += [L.Symbol(tabledata.name, dtype=L.DataType.REAL)] return self.symbols.element_tables[tabledata.name][qp][entity][iq_global_index][ ic_global_index ], symbols else: FE = [] for i in range(dof_index.dim): factor = tabledata.tensor_factors[i] iq_i = quadrature_index.local_index(i) ic_i = dof_index.local_index(i) table = self.symbols.element_tables[factor.name][qp][entity][iq_i][ic_i] symbols += [L.Symbol(factor.name, dtype=L.DataType.REAL)] FE.append(table) return L.Product(FE), symbols ffcx-0.9.0/ffcx/codegeneration/backend.py000066400000000000000000000024501470142666300203350ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Collection of FFCx specific pieces for the code generation phase.""" from __future__ import annotations from ffcx.codegeneration.access import FFCXBackendAccess from ffcx.codegeneration.definitions import FFCXBackendDefinitions from ffcx.codegeneration.symbols import FFCXBackendSymbols from ffcx.ir.representation import ExpressionIR, IntegralIR class FFCXBackend: """Class collecting all aspects of the FFCx backend.""" def __init__(self, ir: IntegralIR | ExpressionIR, options): """Initialise.""" coefficient_numbering = ir.expression.coefficient_numbering coefficient_offsets = ir.expression.coefficient_offsets original_constant_offsets = ir.expression.original_constant_offsets self.symbols = FFCXBackendSymbols( coefficient_numbering, coefficient_offsets, original_constant_offsets ) self.access = FFCXBackendAccess( ir.expression.entity_type, ir.expression.integral_type, self.symbols, options ) self.definitions = FFCXBackendDefinitions( ir.expression.entity_type, ir.expression.integral_type, self.access, options ) ffcx-0.9.0/ffcx/codegeneration/codegeneration.py000066400000000000000000000040111470142666300217270ustar00rootroot00000000000000# Copyright (C) 2009-2017 Anders Logg, Martin Sandve Alnæs, Marie E. Rognes, # Kristian B. Oelgaard, and others # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Compiler stage 4: Code generation. This module implements the generation of C code for the body of each UFC function from an intermediate representation (IR). """ from __future__ import annotations import logging import typing import numpy.typing as npt from ffcx.codegeneration.C.expressions import generator as expression_generator from ffcx.codegeneration.C.file import generator as file_generator from ffcx.codegeneration.C.form import generator as form_generator from ffcx.codegeneration.C.integrals import generator as integral_generator from ffcx.ir.representation import DataIR logger = logging.getLogger("ffcx") class CodeBlocks(typing.NamedTuple): """Storage of code blocks of the form (declaration, implementation). Blocks for integrals, forms and expressions, and start and end of file output """ file_pre: list[tuple[str, str]] integrals: list[tuple[str, str]] forms: list[tuple[str, str]] expressions: list[tuple[str, str]] file_post: list[tuple[str, str]] def generate_code(ir: DataIR, options: dict[str, int | float | npt.DTypeLike]) -> CodeBlocks: """Generate code blocks from intermediate representation.""" logger.info(79 * "*") logger.info("Compiler stage 3: Generating code") logger.info(79 * "*") code_integrals = [integral_generator(integral_ir, options) for integral_ir in ir.integrals] code_forms = [form_generator(form_ir, options) for form_ir in ir.forms] code_expressions = [ expression_generator(expression_ir, options) for expression_ir in ir.expressions ] code_file_pre, code_file_post = file_generator(options) return CodeBlocks( file_pre=[code_file_pre], integrals=code_integrals, forms=code_forms, expressions=code_expressions, file_post=[code_file_post], ) ffcx-0.9.0/ffcx/codegeneration/definitions.py000066400000000000000000000232031470142666300212600ustar00rootroot00000000000000# Copyright (C) 2011-2023 Martin Sandve Alnæs, Igor A. Baratta # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """FFCx/UFC specific variable definitions.""" import logging from typing import Union import ufl import ffcx.codegeneration.lnodes as L from ffcx.ir.analysis.modified_terminals import ModifiedTerminal from ffcx.ir.elementtables import UniqueTableReferenceT from ffcx.ir.representationutils import QuadratureRule logger = logging.getLogger("ffcx") def create_quadrature_index(quadrature_rule, quadrature_index_symbol): """Create a multi index for the quadrature loop.""" ranges = [0] name = quadrature_index_symbol.name indices = [L.Symbol(name, dtype=L.DataType.INT)] if quadrature_rule: ranges = [quadrature_rule.weights.size] if quadrature_rule.has_tensor_factors: dim = len(quadrature_rule.tensor_factors) ranges = [factor[1].size for factor in quadrature_rule.tensor_factors] indices = [L.Symbol(name + f"{i}", dtype=L.DataType.INT) for i in range(dim)] return L.MultiIndex(indices, ranges) def create_dof_index(tabledata, dof_index_symbol): """Create a multi index for the coefficient dofs.""" name = dof_index_symbol.name if tabledata.has_tensor_factorisation: dim = len(tabledata.tensor_factors) ranges = [factor.values.shape[-1] for factor in tabledata.tensor_factors] indices = [L.Symbol(f"{name}{i}", dtype=L.DataType.INT) for i in range(dim)] else: ranges = [tabledata.values.shape[-1]] indices = [L.Symbol(name, dtype=L.DataType.INT)] return L.MultiIndex(indices, ranges) class FFCXBackendDefinitions: """FFCx specific code definitions.""" def __init__(self, entity_type: str, integral_type: str, access, options): """Initialise.""" # Store ir and options self.integral_type = integral_type self.entity_type = entity_type self.access = access self.options = options # called, depending on the first argument type. self.handler_lookup = { ufl.coefficient.Coefficient: self.coefficient, ufl.geometry.Jacobian: self._define_coordinate_dofs_lincomb, ufl.geometry.SpatialCoordinate: self.spatial_coordinate, ufl.constant.Constant: self.pass_through, ufl.geometry.CellVertices: self.pass_through, ufl.geometry.FacetEdgeVectors: self.pass_through, ufl.geometry.CellEdgeVectors: self.pass_through, ufl.geometry.CellFacetJacobian: self.pass_through, ufl.geometry.ReferenceCellVolume: self.pass_through, ufl.geometry.ReferenceFacetVolume: self.pass_through, ufl.geometry.ReferenceCellEdgeVectors: self.pass_through, ufl.geometry.ReferenceFacetEdgeVectors: self.pass_through, ufl.geometry.ReferenceNormal: self.pass_through, ufl.geometry.CellOrientation: self.pass_through, ufl.geometry.FacetOrientation: self.pass_through, } @property def symbols(self): """Return formatter.""" return self.access.symbols def get( self, mt: ModifiedTerminal, tabledata: UniqueTableReferenceT, quadrature_rule: QuadratureRule, access: L.Symbol, ) -> Union[L.Section, list]: """Return definition code for a terminal.""" # Call appropriate handler, depending on the type of terminal terminal = mt.terminal ttype = type(terminal) # Look for parent class of ttype or direct handler while ttype not in self.handler_lookup and ttype.__bases__: ttype = ttype.__bases__[0] # Get the handler from the lookup, or None if not found handler = self.handler_lookup.get(ttype) if handler is None: raise NotImplementedError(f"No handler for terminal type: {ttype}") # Call the handler return handler(mt, tabledata, quadrature_rule, access) def coefficient( self, mt: ModifiedTerminal, tabledata: UniqueTableReferenceT, quadrature_rule: QuadratureRule, access: L.Symbol, ) -> Union[L.Section, list]: """Return definition code for coefficients.""" # For applying tensor product to coefficients, we need to know if the coefficient # has a tensor factorisation and if the quadrature rule has a tensor factorisation. # If both are true, we can apply the tensor product to the coefficient. iq_symbol = self.symbols.quadrature_loop_index ic_symbol = self.symbols.coefficient_dof_sum_index iq = create_quadrature_index(quadrature_rule, iq_symbol) ic = create_dof_index(tabledata, ic_symbol) # Get properties of tables ttype = tabledata.ttype num_dofs = tabledata.values.shape[3] bs = tabledata.block_size begin = tabledata.offset end = begin + bs * (num_dofs - 1) + 1 if ttype == "zeros": logging.debug("Not expecting zero coefficients to get this far.") return [] # For a constant coefficient we reference the dofs directly, so no definition needed if ttype == "ones" and end - begin == 1: return [] assert begin < end # Get access to element table FE, tables = self.access.table_access(tabledata, self.entity_type, mt.restriction, iq, ic) dof_access: L.ArrayAccess = self.symbols.coefficient_dof_access( mt.terminal, (ic.global_index) * bs + begin ) declaration: list[L.Declaration] = [L.VariableDecl(access, 0.0)] body = [L.AssignAdd(access, dof_access * FE)] code = [L.create_nested_for_loops([ic], body)] name = type(mt.terminal).__name__ input = [dof_access.array, *tables] output = [access] annotations = [L.Annotation.fuse] # assert input and output are Symbol objects assert all(isinstance(i, L.Symbol) for i in input) assert all(isinstance(o, L.Symbol) for o in output) return L.Section(name, code, declaration, input, output, annotations) def _define_coordinate_dofs_lincomb( self, mt: ModifiedTerminal, tabledata: UniqueTableReferenceT, quadrature_rule: QuadratureRule, access: L.Symbol, ) -> Union[L.Section, list]: """Define x or J as a linear combination of coordinate dofs with given table data.""" # Get properties of domain domain = ufl.domain.extract_unique_domain(mt.terminal) coordinate_element = domain.ufl_coordinate_element() num_scalar_dofs = coordinate_element._sub_element.dim num_dofs = tabledata.values.shape[3] begin = tabledata.offset assert num_scalar_dofs == num_dofs # Find table name ttype = tabledata.ttype assert ttype != "zeros" assert ttype != "ones" # Get access to element table ic_symbol = self.symbols.coefficient_dof_sum_index iq_symbol = self.symbols.quadrature_loop_index ic = create_dof_index(tabledata, ic_symbol) iq = create_quadrature_index(quadrature_rule, iq_symbol) FE, tables = self.access.table_access(tabledata, self.entity_type, mt.restriction, iq, ic) dof_access = L.Symbol("coordinate_dofs", dtype=L.DataType.REAL) # coordinate dofs is always 3d dim = 3 offset = 0 if mt.restriction == "-": offset = num_scalar_dofs * dim code = [] declaration = [L.VariableDecl(access, 0.0)] body = [L.AssignAdd(access, dof_access[ic.global_index * dim + begin + offset] * FE)] code = [L.create_nested_for_loops([ic], body)] name = type(mt.terminal).__name__ output = [access] input = [dof_access, *tables] annotations = [L.Annotation.fuse] # assert input and output are Symbol objects assert all(isinstance(i, L.Symbol) for i in input) assert all(isinstance(o, L.Symbol) for o in output) return L.Section(name, code, declaration, input, output, annotations) def spatial_coordinate( self, mt: ModifiedTerminal, tabledata: UniqueTableReferenceT, quadrature_rule: QuadratureRule, access: L.Symbol, ) -> Union[L.Section, list]: """Return definition code for the physical spatial coordinates. If physical coordinates are given: No definition needed. If reference coordinates are given: x = sum_k xdof_k xphi_k(X) If reference facet coordinates are given: x = sum_k xdof_k xphi_k(Xf) """ if self.integral_type in ufl.custom_integral_types: # FIXME: Jacobian may need adjustment for custom_integral_types if mt.local_derivatives: logging.exception("FIXME: Jacobian in custom integrals is not implemented.") return [] else: return self._define_coordinate_dofs_lincomb(mt, tabledata, quadrature_rule, access) def jacobian( self, mt: ModifiedTerminal, tabledata: UniqueTableReferenceT, quadrature_rule: QuadratureRule, access: L.Symbol, ) -> Union[L.Section, list]: """Return definition code for the Jacobian of x(X).""" return self._define_coordinate_dofs_lincomb(mt, tabledata, quadrature_rule, access) def pass_through( self, mt: ModifiedTerminal, tabledata: UniqueTableReferenceT, quadrature_rule: QuadratureRule, access: L.Symbol, ) -> Union[L.Section, list]: """Return definition code for pass through terminals.""" return [] ffcx-0.9.0/ffcx/codegeneration/expression_generator.py000066400000000000000000000335371470142666300232250ustar00rootroot00000000000000# Copyright (C) 2019 Michal Habera # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Expression generator.""" import collections import logging from itertools import product from typing import Any import ufl import ffcx.codegeneration.lnodes as L from ffcx.codegeneration import geometry from ffcx.codegeneration.backend import FFCXBackend from ffcx.codegeneration.lnodes import LNode from ffcx.ir.representation import ExpressionIR logger = logging.getLogger("ffcx") class ExpressionGenerator: """Expression generator.""" def __init__(self, ir: ExpressionIR, backend: FFCXBackend): """Initialise.""" if len(list(ir.expression.integrand.keys())) != 1: raise RuntimeError("Only one set of points allowed for expression evaluation") self.ir = ir self.backend = backend self.scope: dict[Any, LNode] = {} self._ufl_names: set[Any] = set() self.symbol_counters: collections.defaultdict[Any, int] = collections.defaultdict(int) self.shared_symbols: dict[Any, Any] = {} self.quadrature_rule = next(iter(self.ir.expression.integrand.keys())) def generate(self): """Generate.""" parts = [] parts += self.generate_element_tables() # Generate the tables of geometry data that are needed parts += self.generate_geometry_tables() parts += self.generate_piecewise_partition() all_preparts = [] all_quadparts = [] preparts, quadparts = self.generate_quadrature_loop() all_preparts += preparts all_quadparts += quadparts # Collect parts before, during, and after quadrature loops parts += all_preparts parts += all_quadparts return L.StatementList(parts) def generate_geometry_tables(self): """Generate static tables of geometry data.""" # Currently we only support circumradius ufl_geometry = { ufl.geometry.ReferenceCellVolume: "reference_cell_volume", ufl.geometry.ReferenceNormal: "reference_facet_normals", } cells: dict[Any, set[Any]] = {t: set() for t in ufl_geometry.keys()} # type: ignore for integrand in self.ir.expression.integrand.values(): for attr in integrand["factorization"].nodes.values(): mt = attr.get("mt") if mt is not None: t = type(mt.terminal) if self.ir.expression.entity_type == "cell" and issubclass( t, ufl.geometry.GeometricFacetQuantity ): raise RuntimeError(f"Expressions for cells do not support {t}.") if t in ufl_geometry: cells[t].add( ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() ) parts = [] for i, cell_list in cells.items(): for c in cell_list: parts.append(geometry.write_table(ufl_geometry[i], c)) return parts def generate_element_tables(self): """Generate tables of FE basis evaluated at specified points.""" parts = [] tables = self.ir.expression.unique_tables table_names = sorted(tables) for name in table_names: table = tables[name] symbol = L.Symbol(name, dtype=L.DataType.REAL) self.backend.symbols.element_tables[name] = symbol decl = L.ArrayDecl(symbol, sizes=table.shape, values=table, const=True) parts += [decl] # Add leading comment if there are any tables parts = L.commented_code_list( parts, [ "Precomputed values of basis functions", "FE* dimensions: [entities][points][dofs]", ], ) return parts def generate_quadrature_loop(self): """Generate quadrature loop for this quadrature rule. In the context of expressions quadrature loop is not accumulated. """ # Generate varying partition body = self.generate_varying_partition() body = L.commented_code_list( body, f"Points loop body setup quadrature loop {self.quadrature_rule.id()}" ) # Generate dofblock parts, some of this # will be placed before or after quadloop preparts, quadparts = self.generate_dofblock_partition() body += quadparts # Wrap body in loop or scope if not body: # Could happen for integral with everything zero and optimized away quadparts = [] else: iq = self.backend.symbols.quadrature_loop_index num_points = self.quadrature_rule.points.shape[0] quadparts = [L.ForRange(iq, 0, num_points, body=body)] return preparts, quadparts def generate_varying_partition(self): """Generate factors of blocks which are not cellwise constant.""" # Get annotated graph of factorisation F = self.ir.expression.integrand[self.quadrature_rule]["factorization"] arraysymbol = L.Symbol(f"sv_{self.quadrature_rule.id()}", dtype=L.DataType.SCALAR) parts = self.generate_partition(arraysymbol, F, "varying") parts = L.commented_code_list( parts, f"Unstructured varying computations for quadrature rule {self.quadrature_rule.id()}", ) return parts def generate_piecewise_partition(self): """Generate factors of blocks which are constant. I.e. do not depend on quadrature points). """ # Get annotated graph of factorisation F = self.ir.expression.integrand[self.quadrature_rule]["factorization"] arraysymbol = L.Symbol("sp", dtype=L.DataType.SCALAR) parts = self.generate_partition(arraysymbol, F, "piecewise") parts = L.commented_code_list(parts, "Unstructured piecewise computations") return parts def generate_dofblock_partition(self): """Generate assignments of blocks multiplied with their factors into final tensor A.""" block_contributions = self.ir.expression.integrand[self.quadrature_rule][ "block_contributions" ] preparts = [] quadparts = [] blocks = [ (blockmap, blockdata) for blockmap, contributions in sorted(block_contributions.items()) for blockdata in contributions ] for blockmap, blockdata in blocks: # Define code for block depending on mode block_preparts, block_quadparts = self.generate_block_parts(blockmap, blockdata) # Add definitions preparts.extend(block_preparts) # Add computations quadparts.extend(block_quadparts) return preparts, quadparts def generate_block_parts(self, blockmap, blockdata): """Generate and return code parts for a given block.""" # The parts to return preparts = [] quadparts = [] block_rank = len(blockmap) blockdims = tuple(len(dofmap) for dofmap in blockmap) ttypes = blockdata.ttypes if "zeros" in ttypes: raise RuntimeError("Not expecting zero arguments to be left in dofblock generation.") arg_indices = tuple(self.backend.symbols.argument_loop_index(i) for i in range(block_rank)) F = self.ir.expression.integrand[self.quadrature_rule]["factorization"] assert not blockdata.transposed, "Not handled yet" components = ufl.product(self.ir.expression.shape) num_points = self.quadrature_rule.points.shape[0] A_shape = [num_points, components] + self.ir.expression.tensor_shape A = self.backend.symbols.element_tensor iq = self.backend.symbols.quadrature_loop_index # Check if DOFs in dofrange are equally spaced. expand_loop = False for bm in blockmap: for a, b in zip(bm[1:-1], bm[2:]): if b - a != bm[1] - bm[0]: expand_loop = True break else: continue break if expand_loop: # If DOFs in dofrange are not equally spaced, then expand out the for loop for A_indices, B_indices in zip( product(*blockmap), product(*[range(len(b)) for b in blockmap]) ): B_indices = tuple([iq] + list(B_indices)) A_indices = tuple([iq] + A_indices) for fi_ci in blockdata.factor_indices_comp_indices: f = self.get_var(F.nodes[fi_ci[0]]["expression"]) arg_factors = self.get_arg_factors(blockdata, block_rank, B_indices) Brhs = L.float_product([f] + arg_factors) multi_index = L.MultiIndex([A_indices[0], fi_ci[1]] + A_indices[1:], A_shape) quadparts.append(L.AssignAdd(A[multi_index], Brhs)) else: # Prepend dimensions of dofmap block with free index # for quadrature points and expression components B_indices = tuple([iq] + list(arg_indices)) # Fetch code to access modified arguments # An access to FE table data arg_factors = self.get_arg_factors(blockdata, block_rank, B_indices) # TODO: handle non-contiguous dof ranges A_indices = [] for bm, index in zip(blockmap, arg_indices): # TODO: switch order here? (optionally) offset = bm[0] if len(bm) == 1: A_indices.append(index + offset) else: block_size = bm[1] - bm[0] A_indices.append(block_size * index + offset) A_indices = tuple([iq] + A_indices) # Multiply collected factors # For each component of the factor expression # add result inside quadloop body = [] for fi_ci in blockdata.factor_indices_comp_indices: f = self.get_var(F.nodes[fi_ci[0]]["expression"]) Brhs = L.float_product([f] + arg_factors) indices = [A_indices[0], fi_ci[1]] + list(A_indices[1:]) multi_index = L.MultiIndex(indices, A_shape) body.append(L.AssignAdd(A[multi_index], Brhs)) for i in reversed(range(block_rank)): body = L.ForRange(B_indices[i + 1], 0, blockdims[i], body=body) quadparts += [body] return preparts, quadparts def get_arg_factors(self, blockdata, block_rank, indices): """Get argument factors (i.e. blocks). Args: blockdata: block data block_rank: block rank indices: Indices used to index element tables """ arg_factors = [] for i in range(block_rank): mad = blockdata.ma_data[i] td = mad.tabledata mt = self.ir.expression.integrand[self.quadrature_rule]["modified_arguments"][ mad.ma_index ] table = self.backend.symbols.element_table( td, self.ir.expression.entity_type, mt.restriction ) assert td.ttype != "zeros" if td.ttype == "ones": arg_factor = L.LiteralFloat(1.0) else: arg_factor = table[indices[i + 1]] arg_factors.append(arg_factor) return arg_factors def new_temp_symbol(self, basename): """Create a new code symbol named basename + running counter.""" name = "%s%d" % (basename, self.symbol_counters[basename]) self.symbol_counters[basename] += 1 return L.Symbol(name, dtype=L.DataType.SCALAR) def get_var(self, v): """Get a variable.""" if v._ufl_is_literal_: return L.ufl_to_lnodes(v) f = self.scope.get(v) return f def generate_partition(self, symbol, F, mode): """Generate computations of factors of blocks.""" definitions = [] intermediates = [] for _, attr in F.nodes.items(): if attr["status"] != mode: continue v = attr["expression"] mt = attr.get("mt") if v._ufl_is_literal_: vaccess = L.ufl_to_lnodes(v) elif mt is not None: # All finite element based terminals have table data, as well # as some, but not all, of the symbolic geometric terminals tabledata = attr.get("tr") # Backend specific modified terminal translation vaccess = self.backend.access.get(mt, tabledata, 0) vdef = self.backend.definitions.get(mt, tabledata, 0, vaccess) if vdef: assert isinstance(vdef, L.Section) vdef = vdef.declarations + vdef.statements # Store definitions of terminals in list assert isinstance(vdef, list) definitions.extend(vdef) else: # Get previously visited operands vops = [self.get_var(op) for op in v.ufl_operands] # Mapping UFL operator to target language self._ufl_names.add(v._ufl_handler_name_) vexpr = L.ufl_to_lnodes(v, *vops) is_cond = isinstance(v, ufl.classes.Condition) dtype = L.DataType.BOOL if is_cond else L.DataType.SCALAR j = len(intermediates) vaccess = L.Symbol(f"{symbol.name}_{j}", dtype=dtype) intermediates.append(L.VariableDecl(vaccess, vexpr)) # Store access node for future reference self.scope[v] = vaccess # Join terminal computation, array of intermediate expressions, # and intermediate computations parts = [] parts += definitions parts += intermediates return parts ffcx-0.9.0/ffcx/codegeneration/geometry.py000066400000000000000000000124331470142666300206030ustar00rootroot00000000000000# Copyright (C) 2021 Matthew Scroggs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Geometry.""" import basix import numpy as np import ffcx.codegeneration.lnodes as L def write_table(tablename, cellname): """Write a table.""" if tablename == "facet_edge_vertices": return facet_edge_vertices(tablename, cellname) if tablename == "reference_facet_jacobian": return reference_facet_jacobian(tablename, cellname) if tablename == "reference_cell_volume": return reference_cell_volume(tablename, cellname) if tablename == "reference_facet_volume": return reference_facet_volume(tablename, cellname) if tablename == "reference_edge_vectors": return reference_edge_vectors(tablename, cellname) if tablename == "facet_reference_edge_vectors": return facet_reference_edge_vectors(tablename, cellname) if tablename == "reference_facet_normals": return reference_facet_normals(tablename, cellname) if tablename == "facet_orientation": return facet_orientation(tablename, cellname) raise ValueError(f"Unknown geometry table name: {tablename}") def facet_edge_vertices(tablename, cellname): """Write facet edge vertices.""" celltype = getattr(basix.CellType, cellname) topology = basix.topology(celltype) triangle_edges = basix.topology(basix.CellType.triangle)[1] quadrilateral_edges = basix.topology(basix.CellType.quadrilateral)[1] if len(topology) != 4: raise ValueError("Can only get facet edges for 3D cells.") edge_vertices = [] for facet in topology[-2]: if len(facet) == 3: edge_vertices += [[[facet[i] for i in edge] for edge in triangle_edges]] elif len(facet) == 4: edge_vertices += [[[facet[i] for i in edge] for edge in quadrilateral_edges]] else: raise ValueError("Only triangular and quadrilateral faces supported.") out = np.array(edge_vertices, dtype=int) symbol = L.Symbol(f"{cellname}_{tablename}", dtype=L.DataType.INT) return L.ArrayDecl(symbol, values=out, const=True) def reference_facet_jacobian(tablename, cellname): """Write a reference facet jacobian.""" celltype = getattr(basix.CellType, cellname) out = basix.cell.facet_jacobians(celltype) symbol = L.Symbol(f"{cellname}_{tablename}", dtype=L.DataType.REAL) return L.ArrayDecl(symbol, values=out, const=True) def reference_cell_volume(tablename, cellname): """Write a reference cell volume.""" celltype = getattr(basix.CellType, cellname) out = basix.cell.volume(celltype) symbol = L.Symbol(f"{cellname}_{tablename}", dtype=L.DataType.REAL) return L.VariableDecl(symbol, out) def reference_facet_volume(tablename, cellname): """Write a reference facet volume.""" celltype = getattr(basix.CellType, cellname) volumes = basix.cell.facet_reference_volumes(celltype) for i in volumes[1:]: if not np.isclose(i, volumes[0]): raise ValueError("Reference facet volume not supported for this cell type.") symbol = L.Symbol(f"{cellname}_{tablename}", dtype=L.DataType.REAL) return L.VariableDecl(symbol, volumes[0]) def reference_edge_vectors(tablename, cellname): """Write reference edge vectors.""" celltype = getattr(basix.CellType, cellname) topology = basix.topology(celltype) geometry = basix.geometry(celltype) edge_vectors = [geometry[j] - geometry[i] for i, j in topology[1]] out = np.array(edge_vectors) symbol = L.Symbol(f"{cellname}_{tablename}", dtype=L.DataType.REAL) return L.ArrayDecl(symbol, values=out, const=True) def facet_reference_edge_vectors(tablename, cellname): """Write facet reference edge vectors.""" celltype = getattr(basix.CellType, cellname) topology = basix.topology(celltype) geometry = basix.geometry(celltype) triangle_edges = basix.topology(basix.CellType.triangle)[1] quadrilateral_edges = basix.topology(basix.CellType.quadrilateral)[1] if len(topology) != 4: raise ValueError("Can only get facet edges for 3D cells.") edge_vectors = [] for facet in topology[-2]: if len(facet) == 3: edge_vectors += [geometry[facet[j]] - geometry[facet[i]] for i, j in triangle_edges] elif len(facet) == 4: edge_vectors += [ geometry[facet[j]] - geometry[facet[i]] for i, j in quadrilateral_edges ] else: raise ValueError("Only triangular and quadrilateral faces supported.") out = np.array(edge_vectors) symbol = L.Symbol(f"{cellname}_{tablename}", dtype=L.DataType.REAL) return L.ArrayDecl(symbol, values=out, const=True) def reference_facet_normals(tablename, cellname): """Write reference facet normals.""" celltype = getattr(basix.CellType, cellname) out = basix.cell.facet_outward_normals(celltype) symbol = L.Symbol(f"{cellname}_{tablename}", dtype=L.DataType.REAL) return L.ArrayDecl(symbol, values=out, const=True) def facet_orientation(tablename, cellname): """Write facet orientations.""" celltype = getattr(basix.CellType, cellname) out = basix.cell.facet_orientations(celltype) symbol = L.Symbol(f"{cellname}_{tablename}", dtype=L.DataType.REAL) return L.ArrayDecl(symbol, values=out, const=True) ffcx-0.9.0/ffcx/codegeneration/integral_generator.py000066400000000000000000000526121470142666300226260ustar00rootroot00000000000000# Copyright (C) 2015-2024 Martin Sandve Alnæs, Michal Habera, Igor Baratta, Chris Richardson # # Modified by Jørgen S. Dokken, 2024 # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Integral generator.""" import collections import logging from numbers import Integral from typing import Any import ufl import ffcx.codegeneration.lnodes as L from ffcx.codegeneration import geometry from ffcx.codegeneration.definitions import create_dof_index, create_quadrature_index from ffcx.codegeneration.optimizer import optimize from ffcx.ir.elementtables import piecewise_ttypes from ffcx.ir.integral import BlockDataT from ffcx.ir.representationutils import QuadratureRule logger = logging.getLogger("ffcx") def extract_dtype(v, vops: list[Any]): """Extract dtype from ufl expression v and its operands.""" dtypes = [] for op in vops: if hasattr(op, "dtype"): dtypes.append(op.dtype) elif hasattr(op, "symbol"): dtypes.append(op.symbol.dtype) elif isinstance(op, Integral): dtypes.append(L.DataType.INT) else: raise RuntimeError(f"Not expecting this type of operand {type(op)}") is_cond = isinstance(v, ufl.classes.Condition) return L.DataType.BOOL if is_cond else L.merge_dtypes(dtypes) class IntegralGenerator: """Integral generator.""" def __init__(self, ir, backend): """Initialise.""" # Store ir self.ir = ir # Backend specific plugin with attributes # - symbols: for translating ufl operators to target language # - definitions: for defining backend specific variables # - access: for accessing backend specific variables self.backend = backend # Set of operator names code has been generated for, used in the # end for selecting necessary includes self._ufl_names = set() # Initialize lookup tables for variable scopes self.init_scopes() # Cache self.temp_symbols = {} # Set of counters used for assigning names to intermediate # variables self.symbol_counters = collections.defaultdict(int) def init_scopes(self): """Initialize variable scope dicts.""" # Reset variables, separate sets for each quadrature rule self.scopes = { quadrature_rule: {} for quadrature_rule in self.ir.expression.integrand.keys() } self.scopes[None] = {} def set_var(self, quadrature_rule, v, vaccess): """Set a new variable in variable scope dicts. Scope is determined by quadrature_rule which identifies the quadrature loop scope or None if outside quadrature loops. Args: quadrature_rule: Quadrature rule v: the ufl expression vaccess: the LNodes expression to access the value in the code """ self.scopes[quadrature_rule][v] = vaccess def get_var(self, quadrature_rule, v): """Lookup ufl expression v in variable scope dicts. Scope is determined by quadrature rule which identifies the quadrature loop scope or None if outside quadrature loops. If v is not found in quadrature loop scope, the piecewise scope (None) is checked. Returns the LNodes expression to access the value in the code. """ if v._ufl_is_literal_: return L.ufl_to_lnodes(v) # quadrature loop scope f = self.scopes[quadrature_rule].get(v) # piecewise scope if f is None: f = self.scopes[None].get(v) return f def new_temp_symbol(self, basename): """Create a new code symbol named basename + running counter.""" name = "%s%d" % (basename, self.symbol_counters[basename]) self.symbol_counters[basename] += 1 return L.Symbol(name, dtype=L.DataType.SCALAR) def get_temp_symbol(self, tempname, key): """Get a temporary symbol.""" key = (tempname,) + key s = self.temp_symbols.get(key) defined = s is not None if not defined: s = self.new_temp_symbol(tempname) self.temp_symbols[key] = s return s, defined def generate(self): """Generate entire tabulate_tensor body. Assumes that the code returned from here will be wrapped in a context that matches a suitable version of the UFC tabulate_tensor signatures. """ # Assert that scopes are empty: expecting this to be called only # once assert not any(d for d in self.scopes.values()) parts = [] # Generate the tables of quadrature points and weights parts += self.generate_quadrature_tables() # Generate the tables of basis function values and # pre-integrated blocks parts += self.generate_element_tables() # Generate the tables of geometry data that are needed parts += self.generate_geometry_tables() # Loop generation code will produce parts to go before # quadloops, to define the quadloops, and to go after the # quadloops all_preparts = [] all_quadparts = [] # Pre-definitions are collected across all quadrature loops to # improve re-use and avoid name clashes for rule in self.ir.expression.integrand.keys(): # Generate code to compute piecewise constant scalar factors all_preparts += self.generate_piecewise_partition(rule) # Generate code to integrate reusable blocks of final # element tensor all_quadparts += self.generate_quadrature_loop(rule) # Collect parts before, during, and after quadrature loops parts += all_preparts parts += all_quadparts return L.StatementList(parts) def generate_quadrature_tables(self): """Generate static tables of quadrature points and weights.""" parts = [] # No quadrature tables for custom (given argument) or point # (evaluation in single vertex) skip = ufl.custom_integral_types + ufl.measure.point_integral_types if self.ir.expression.integral_type in skip: return parts # Loop over quadrature rules for quadrature_rule, _ in self.ir.expression.integrand.items(): # Generate quadrature weights array wsym = self.backend.symbols.weights_table(quadrature_rule) parts += [L.ArrayDecl(wsym, values=quadrature_rule.weights, const=True)] # Add leading comment if there are any tables parts = L.commented_code_list(parts, "Quadrature rules") return parts def generate_geometry_tables(self): """Generate static tables of geometry data.""" ufl_geometry = { ufl.geometry.FacetEdgeVectors: "facet_edge_vertices", ufl.geometry.CellFacetJacobian: "reference_facet_jacobian", ufl.geometry.ReferenceCellVolume: "reference_cell_volume", ufl.geometry.ReferenceFacetVolume: "reference_facet_volume", ufl.geometry.ReferenceCellEdgeVectors: "reference_edge_vectors", ufl.geometry.ReferenceFacetEdgeVectors: "facet_reference_edge_vectors", ufl.geometry.ReferenceNormal: "reference_facet_normals", ufl.geometry.FacetOrientation: "facet_orientation", } cells: dict[Any, set[Any]] = {t: set() for t in ufl_geometry.keys()} # type: ignore for integrand in self.ir.expression.integrand.values(): for attr in integrand["factorization"].nodes.values(): mt = attr.get("mt") if mt is not None: t = type(mt.terminal) if t in ufl_geometry: cells[t].add( ufl.domain.extract_unique_domain(mt.terminal).ufl_cell().cellname() ) parts = [] for i, cell_list in cells.items(): for c in cell_list: parts.append(geometry.write_table(ufl_geometry[i], c)) return parts def generate_element_tables(self): """Generate static tables. With precomputed element basis function values in quadrature points. """ parts = [] tables = self.ir.expression.unique_tables table_types = self.ir.expression.unique_table_types if self.ir.expression.integral_type in ufl.custom_integral_types: # Define only piecewise tables table_names = [name for name in sorted(tables) if table_types[name] in piecewise_ttypes] else: # Define all tables table_names = sorted(tables) for name in table_names: table = tables[name] parts += self.declare_table(name, table) # Add leading comment if there are any tables parts = L.commented_code_list( parts, [ "Precomputed values of basis functions and precomputations", "FE* dimensions: [permutation][entities][points][dofs]", ], ) return parts def declare_table(self, name, table): """Declare a table. If the dof dimensions of the table have dof rotations, apply these rotations. """ table_symbol = L.Symbol(name, dtype=L.DataType.REAL) self.backend.symbols.element_tables[name] = table_symbol return [L.ArrayDecl(table_symbol, values=table, const=True)] def generate_quadrature_loop(self, quadrature_rule: QuadratureRule): """Generate quadrature loop with for this quadrature_rule.""" # Generate varying partition definitions, intermediates_0 = self.generate_varying_partition(quadrature_rule) # Generate dofblock parts, some of this will be placed before or after quadloop tensor_comp, intermediates_fw = self.generate_dofblock_partition(quadrature_rule) assert all([isinstance(tc, L.Section) for tc in tensor_comp]) # Check if we only have Section objects inputs = [] for definition in definitions: assert isinstance(definition, L.Section) inputs += definition.output # Create intermediates section output = [] declarations = [] for fw in intermediates_fw: assert isinstance(fw, L.VariableDecl) output += [fw.symbol] declarations += [L.VariableDecl(fw.symbol, 0)] intermediates_0 += [L.Assign(fw.symbol, fw.value)] intermediates = [L.Section("Intermediates", intermediates_0, declarations, inputs, output)] iq_symbol = self.backend.symbols.quadrature_loop_index iq = create_quadrature_index(quadrature_rule, iq_symbol) code = definitions + intermediates + tensor_comp code = optimize(code, quadrature_rule) return [L.create_nested_for_loops([iq], code)] def generate_piecewise_partition(self, quadrature_rule): """Generate a piecewise partition.""" # Get annotated graph of factorisation F = self.ir.expression.integrand[quadrature_rule]["factorization"] arraysymbol = L.Symbol(f"sp_{quadrature_rule.id()}", dtype=L.DataType.SCALAR) return self.generate_partition(arraysymbol, F, "piecewise", None) def generate_varying_partition(self, quadrature_rule): """Generate a varying partition.""" # Get annotated graph of factorisation F = self.ir.expression.integrand[quadrature_rule]["factorization"] arraysymbol = L.Symbol(f"sv_{quadrature_rule.id()}", dtype=L.DataType.SCALAR) return self.generate_partition(arraysymbol, F, "varying", quadrature_rule) def generate_partition(self, symbol, F, mode, quadrature_rule): """Generate a partition.""" definitions = [] intermediates = [] for i, attr in F.nodes.items(): if attr["status"] != mode: continue v = attr["expression"] # Generate code only if the expression is not already in cache if not self.get_var(quadrature_rule, v): if v._ufl_is_literal_: vaccess = L.ufl_to_lnodes(v) elif mt := attr.get("mt"): tabledata = attr.get("tr") # Backend specific modified terminal translation vaccess = self.backend.access.get(mt, tabledata, quadrature_rule) vdef = self.backend.definitions.get(mt, tabledata, quadrature_rule, vaccess) if vdef: assert isinstance(vdef, L.Section) # Only add if definition is unique. # This can happen when using sub-meshes if vdef not in definitions: definitions += [vdef] else: # Get previously visited operands vops = [self.get_var(quadrature_rule, op) for op in v.ufl_operands] dtype = extract_dtype(v, vops) # Mapping UFL operator to target language self._ufl_names.add(v._ufl_handler_name_) vexpr = L.ufl_to_lnodes(v, *vops) j = len(intermediates) vaccess = L.Symbol(f"{symbol.name}_{j}", dtype=dtype) intermediates.append(L.VariableDecl(vaccess, vexpr)) # Store access node for future reference self.set_var(quadrature_rule, v, vaccess) # Optimize definitions definitions = optimize(definitions, quadrature_rule) return definitions, intermediates def generate_dofblock_partition(self, quadrature_rule: QuadratureRule): """Generate a dofblock partition.""" block_contributions = self.ir.expression.integrand[quadrature_rule]["block_contributions"] quadparts = [] blocks = [ (blockmap, blockdata) for blockmap, contributions in sorted(block_contributions.items()) for blockdata in contributions ] block_groups = collections.defaultdict(list) # Group loops by blockmap, in Vector elements each component has # a different blockmap for blockmap, blockdata in blocks: scalar_blockmap = [] assert len(blockdata.ma_data) == len(blockmap) for i, b in enumerate(blockmap): bs = blockdata.ma_data[i].tabledata.block_size offset = blockdata.ma_data[i].tabledata.offset b = tuple([(idx - offset) // bs for idx in b]) scalar_blockmap.append(b) block_groups[tuple(scalar_blockmap)].append(blockdata) intermediates = [] for blockmap in block_groups: block_quadparts, intermediate = self.generate_block_parts( quadrature_rule, blockmap, block_groups[blockmap] ) intermediates += intermediate # Add computations quadparts.extend(block_quadparts) return quadparts, intermediates def get_arg_factors(self, blockdata, block_rank, quadrature_rule, iq, indices): """Get arg factors.""" arg_factors = [] tables = [] for i in range(block_rank): mad = blockdata.ma_data[i] td = mad.tabledata scope = self.ir.expression.integrand[quadrature_rule]["modified_arguments"] mt = scope[mad.ma_index] arg_tables = [] # Translate modified terminal to code # TODO: Move element table access out of backend? # Not using self.backend.access.argument() here # now because it assumes too much about indices. assert td.ttype != "zeros" if td.ttype == "ones": arg_factor = 1 else: # Assuming B sparsity follows element table sparsity arg_factor, arg_tables = self.backend.access.table_access( td, self.ir.expression.entity_type, mt.restriction, iq, indices[i] ) tables += arg_tables arg_factors.append(arg_factor) return arg_factors, tables def generate_block_parts( self, quadrature_rule: QuadratureRule, blockmap: tuple, blocklist: list[BlockDataT] ): """Generate and return code parts for a given block. Returns parts occurring before, inside, and after the quadrature loop identified by the quadrature rule. Should be called with quadrature_rule=None for quadloop-independent blocks. """ # The parts to return quadparts: list[L.LNode] = [] intermediates: list[L.LNode] = [] tables = [] vars = [] # RHS expressions grouped by LHS "dofmap" rhs_expressions = collections.defaultdict(list) block_rank = len(blockmap) iq_symbol = self.backend.symbols.quadrature_loop_index iq = create_quadrature_index(quadrature_rule, iq_symbol) for blockdata in blocklist: B_indices = [] for i in range(block_rank): table_ref = blockdata.ma_data[i].tabledata symbol = self.backend.symbols.argument_loop_index(i) index = create_dof_index(table_ref, symbol) B_indices.append(index) ttypes = blockdata.ttypes if "zeros" in ttypes: raise RuntimeError( "Not expecting zero arguments to be left in dofblock generation." ) if len(blockdata.factor_indices_comp_indices) > 1: raise RuntimeError("Code generation for non-scalar integrals unsupported") # We have scalar integrand here, take just the factor index factor_index = blockdata.factor_indices_comp_indices[0][0] # Get factor expression F = self.ir.expression.integrand[quadrature_rule]["factorization"] v = F.nodes[factor_index]["expression"] f = self.get_var(quadrature_rule, v) # Quadrature weight was removed in representation, add it back now if self.ir.expression.integral_type in ufl.custom_integral_types: weights = self.backend.symbols.custom_weights_table weight = weights[iq.global_index] else: weights = self.backend.symbols.weights_table(quadrature_rule) weight = weights[iq.global_index] # Define fw = f * weight fw_rhs = L.float_product([f, weight]) if not isinstance(fw_rhs, L.Product): fw = fw_rhs else: # Define and cache scalar temp variable key = (quadrature_rule, factor_index, blockdata.all_factors_piecewise) fw, defined = self.get_temp_symbol("fw", key) if not defined: input = [f, weight] # filter only L.Symbol in input input = [i for i in input if isinstance(i, L.Symbol)] output = [fw] # assert input and output are Symbol objects assert all(isinstance(i, L.Symbol) for i in input) assert all(isinstance(o, L.Symbol) for o in output) intermediates += [L.VariableDecl(fw, fw_rhs)] var = fw if isinstance(fw, L.Symbol) else fw.array vars += [var] assert not blockdata.transposed, "Not handled yet" # Fetch code to access modified arguments arg_factors, table = self.get_arg_factors( blockdata, block_rank, quadrature_rule, iq, B_indices ) tables += table # Define B_rhs = fw * arg_factors B_rhs = L.float_product([fw] + arg_factors) A_indices = [] for i in range(block_rank): index = B_indices[i] tabledata = blockdata.ma_data[i].tabledata offset = tabledata.offset if len(blockmap[i]) == 1: A_indices.append(index.global_index + offset) else: block_size = blockdata.ma_data[i].tabledata.block_size A_indices.append(block_size * index.global_index + offset) rhs_expressions[tuple(A_indices)].append(B_rhs) # List of statements to keep in the inner loop keep = collections.defaultdict(list) for indices in rhs_expressions: keep[indices] = rhs_expressions[indices] body: list[L.LNode] = [] A = self.backend.symbols.element_tensor A_shape = self.ir.expression.tensor_shape for indices in keep: multi_index = L.MultiIndex(list(indices), A_shape) for expression in keep[indices]: body.append(L.AssignAdd(A[multi_index], expression)) # reverse B_indices B_indices = B_indices[::-1] body = [L.create_nested_for_loops(B_indices, body)] input = [*vars, *tables] output = [A] # Make sure we don't have repeated symbols in input input = list(set(input)) # assert input and output are Symbol objects assert all(isinstance(i, L.Symbol) for i in input) assert all(isinstance(o, L.Symbol) for o in output) annotations = [] if len(B_indices) > 1: annotations.append(L.Annotation.licm) quadparts += [L.Section("Tensor Computation", body, [], input, output, annotations)] return quadparts, intermediates ffcx-0.9.0/ffcx/codegeneration/jit.py000066400000000000000000000326571470142666300175500ustar00rootroot00000000000000# Copyright (C) 2004-2019 Garth N. Wells # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Just-in-time compilation.""" from __future__ import annotations import importlib import io import logging import os import re import sys import sysconfig import tempfile import time from contextlib import redirect_stdout from pathlib import Path import cffi import numpy as np import numpy.typing as npt import ufl import ffcx import ffcx.naming from ffcx.codegeneration.C.file_template import libraries as _libraries logger = logging.getLogger("ffcx") root_logger = logging.getLogger() # Get declarations directly from ufcx.h file_dir = os.path.dirname(os.path.abspath(__file__)) with open(file_dir + "/ufcx.h") as f: ufcx_h = "".join(f.readlines()) # Emulate C preprocessor on __STDC_NO_COMPLEX__ if sys.platform.startswith("win32"): # Remove macro statements and content ufcx_h = re.sub( r"\#ifndef __STDC_NO_COMPLEX__.*?\#endif // __STDC_NO_COMPLEX__", "", ufcx_h, flags=re.DOTALL, ) else: # Remove only macros keeping content ufcx_h = ufcx_h.replace("#ifndef __STDC_NO_COMPLEX__", "") ufcx_h = ufcx_h.replace("#endif // __STDC_NO_COMPLEX__", "") header = ufcx_h.split("")[1].split("")[0].strip(" /\n") header = header.replace("{", "{{").replace("}", "}}") UFC_HEADER_DECL = header + "\n" UFC_FORM_DECL = "\n".join(re.findall("typedef struct ufcx_form.*?ufcx_form;", ufcx_h, re.DOTALL)) UFC_INTEGRAL_DECL = "\n".join( re.findall(r"typedef void ?\(ufcx_tabulate_tensor_float32\).*?\);", ufcx_h, re.DOTALL) ) UFC_INTEGRAL_DECL += "\n".join( re.findall(r"typedef void ?\(ufcx_tabulate_tensor_float64\).*?\);", ufcx_h, re.DOTALL) ) UFC_INTEGRAL_DECL += "\n".join( re.findall(r"typedef void ?\(ufcx_tabulate_tensor_complex64\).*?\);", ufcx_h, re.DOTALL) ) UFC_INTEGRAL_DECL += "\n".join( re.findall(r"typedef void ?\(ufcx_tabulate_tensor_complex128\).*?\);", ufcx_h, re.DOTALL) ) UFC_INTEGRAL_DECL += "\n".join( re.findall("typedef struct ufcx_integral.*?ufcx_integral;", ufcx_h, re.DOTALL) ) UFC_EXPRESSION_DECL = "\n".join( re.findall("typedef struct ufcx_expression.*?ufcx_expression;", ufcx_h, re.DOTALL) ) def _compute_option_signature(options): """Return options signature (some options should not affect signature).""" return str(sorted(options.items())) def get_cached_module(module_name, object_names, cache_dir, timeout): """Look for an existing C file and wait for compilation, or if it does not exist, create it.""" cache_dir = Path(cache_dir) c_filename = cache_dir.joinpath(module_name).with_suffix(".c") ready_name = c_filename.with_suffix(".c.cached") # Ensure cache dir exists cache_dir.mkdir(exist_ok=True, parents=True) try: # Create C file with exclusive access with open(c_filename, "x"): pass return None, None except FileExistsError: logger.info("Cached C file already exists: " + str(c_filename)) finder = importlib.machinery.FileFinder( str(cache_dir), (importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES), ) finder.invalidate_caches() # Now, wait for ready for i in range(timeout): if os.path.exists(ready_name): spec = finder.find_spec(module_name) if spec is None: raise ModuleNotFoundError("Unable to find JIT module.") compiled_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(compiled_module) compiled_objects = [getattr(compiled_module.lib, name) for name in object_names] return compiled_objects, compiled_module logger.info(f"Waiting for {ready_name} to appear.") time.sleep(1) raise TimeoutError( "JIT compilation timed out, probably due to a failed previous compile. " f"Try cleaning cache (e.g. remove {c_filename}) or increase timeout option." ) def _compilation_signature(cffi_extra_compile_args, cffi_debug): """Compute the compilation-inputs part of the signature. Used to avoid cache conflicts across Python versions, architectures, installs. - SOABI includes platform, Python version, debug flags - CFLAGS includes prefixes, arch targets """ if sys.platform.startswith("win32"): # NOTE: SOABI not defined on win32, EXT_SUFFIX contains e.g. '.cp312-win_amd64.pyd' return ( str(cffi_extra_compile_args) + str(cffi_debug) + str(sysconfig.get_config_var("EXT_SUFFIX")) ) else: return ( str(cffi_extra_compile_args) + str(cffi_debug) + str(sysconfig.get_config_var("CFLAGS")) + str(sysconfig.get_config_var("SOABI")) ) def compile_forms( forms: list[ufl.Form], options: dict = {}, cache_dir: Path | None = None, timeout: int = 10, cffi_extra_compile_args: list[str] = [], cffi_verbose: bool = False, cffi_debug: bool = False, cffi_libraries: list[str] = [], visualise: bool = False, ): """Compile a list of UFL forms into UFC Python objects. Args: forms: List of ufl.form to compile. options: Options cache_dir: Cache directory timeout: Timeout cffi_extra_compile_args: Extra compilation args for CFFI cffi_verbose: Use verbose compile cffi_debug: Use compiler debug mode cffi_libraries: libraries to use with compiler visualise: Toggle visualisation """ p = ffcx.options.get_options(options) # Get a signature for these forms module_name = "libffcx_forms_" + ffcx.naming.compute_signature( forms, _compute_option_signature(p) + _compilation_signature(cffi_extra_compile_args, cffi_debug), ) form_names = [ffcx.naming.form_name(form, i, module_name) for i, form in enumerate(forms)] if cache_dir is not None: cache_dir = Path(cache_dir) obj, mod = get_cached_module(module_name, form_names, cache_dir, timeout) if obj is not None: return obj, mod, (None, None) else: cache_dir = Path(tempfile.mkdtemp()) try: decl = ( UFC_HEADER_DECL.format(np.dtype(p["scalar_type"]).name) # type: ignore + UFC_INTEGRAL_DECL + UFC_FORM_DECL ) form_template = "extern ufcx_form {name};\n" for name in form_names: decl += form_template.format(name=name) impl = _compile_objects( decl, forms, form_names, module_name, p, cache_dir, cffi_extra_compile_args, cffi_verbose, cffi_debug, cffi_libraries, visualise=visualise, ) except Exception as e: try: # remove c file so that it will not timeout next time c_filename = cache_dir.joinpath(module_name + ".c") os.replace(c_filename, c_filename.with_suffix(".c.failed")) except Exception: pass raise e obj, module = _load_objects(cache_dir, module_name, form_names) return obj, module, (decl, impl) def compile_expressions( expressions: list[tuple[ufl.Expr, npt.NDArray[np.floating]]], options: dict = {}, cache_dir: Path | None = None, timeout: int = 10, cffi_extra_compile_args: list[str] = [], cffi_verbose: bool = False, cffi_debug: bool = False, cffi_libraries: list[str] = [], visualise: bool = False, ): """Compile a list of UFL expressions into UFC Python objects. Args: expressions: List of (UFL expression, evaluation points). options: Options cache_dir: Cache directory timeout: Timeout cffi_extra_compile_args: Extra compilation args for CFFI cffi_verbose: Use verbose compile cffi_debug: Use compiler debug mode cffi_libraries: libraries to use with compiler visualise: Toggle visualisation """ p = ffcx.options.get_options(options) module_name = "libffcx_expressions_" + ffcx.naming.compute_signature( expressions, _compute_option_signature(p) + _compilation_signature(cffi_extra_compile_args, cffi_debug), ) expr_names = [ ffcx.naming.expression_name(expression, module_name) for expression in expressions ] if cache_dir is not None: cache_dir = Path(cache_dir) obj, mod = get_cached_module(module_name, expr_names, cache_dir, timeout) if obj is not None: return obj, mod, (None, None) else: cache_dir = Path(tempfile.mkdtemp()) try: decl = ( UFC_HEADER_DECL.format(np.dtype(p["scalar_type"]).name) # type: ignore + UFC_INTEGRAL_DECL + UFC_FORM_DECL + UFC_EXPRESSION_DECL ) expression_template = "extern ufcx_expression {name};\n" for name in expr_names: decl += expression_template.format(name=name) impl = _compile_objects( decl, expressions, expr_names, module_name, p, cache_dir, cffi_extra_compile_args, cffi_verbose, cffi_debug, cffi_libraries, visualise=visualise, ) except Exception as e: try: # remove c file so that it will not timeout next time c_filename = cache_dir.joinpath(module_name + ".c") os.replace(c_filename, c_filename.with_suffix(".c.failed")) except Exception: pass raise e obj, module = _load_objects(cache_dir, module_name, expr_names) return obj, module, (decl, impl) def _compile_objects( decl, ufl_objects, object_names, module_name, options, cache_dir, cffi_extra_compile_args, cffi_verbose, cffi_debug, cffi_libraries, visualise: bool = False, ): import ffcx.compiler libraries = _libraries + cffi_libraries if cffi_libraries is not None else _libraries # JIT uses module_name as prefix, which is needed to make names of all struct/function # unique across modules _, code_body = ffcx.compiler.compile_ufl_objects( ufl_objects, prefix=module_name, options=options, visualise=visualise ) # Raise error immediately prior to compilation if no support for C99 # _Complex. Doing this here allows FFCx to be used for complex codegen on # Windows. if sys.platform.startswith("win32"): if np.issubdtype(options["scalar_type"], np.complexfloating): raise NotImplementedError("win32 platform does not support C99 _Complex numbers") elif isinstance(options["scalar_type"], str) and "complex" in options["scalar_type"]: raise NotImplementedError("win32 platform does not support C99 _Complex numbers") # Compile in C17 mode if sys.platform.startswith("win32"): cffi_base_compile_args = ["-std:c17"] else: cffi_base_compile_args = ["-std=c17"] cffi_final_compile_args = cffi_base_compile_args + cffi_extra_compile_args ffibuilder = cffi.FFI() ffibuilder.set_source( module_name, code_body, include_dirs=[ffcx.codegeneration.get_include_path()], extra_compile_args=cffi_final_compile_args, libraries=libraries, ) ffibuilder.cdef(decl) c_filename = cache_dir.joinpath(module_name + ".c") ready_name = c_filename.with_suffix(".c.cached") # Compile (ensuring that compile dir exists) cache_dir.mkdir(exist_ok=True, parents=True) logger.info(79 * "#") logger.info("Calling JIT C compiler") logger.info(79 * "#") t0 = time.time() f = io.StringIO() # Temporarily set root logger handlers to string buffer only # since CFFI logs into root logger old_handlers = root_logger.handlers.copy() root_logger.handlers = [logging.StreamHandler(f)] with redirect_stdout(f): ffibuilder.compile(tmpdir=cache_dir, verbose=True, debug=cffi_debug) s = f.getvalue() if cffi_verbose: print(s) logger.info(f"JIT C compiler finished in {time.time() - t0:.4f}") # Create a "status ready" file. If this fails, it is an error, # because it should not exist yet. # Copy the stdout verbose output of the build into the ready file fd = open(ready_name, "x") fd.write(s) fd.close() # Copy back the original handlers (in case someone is logging into # root logger and has custom handlers) root_logger.handlers = old_handlers return code_body def _load_objects(cache_dir, module_name, object_names): # Create module finder that searches the compile path finder = importlib.machinery.FileFinder( str(cache_dir), (importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES), ) # Find module. Clear search cache to be sure dynamically created # (new) modules are found finder.invalidate_caches() spec = finder.find_spec(module_name) if spec is None: raise ModuleNotFoundError("Unable to find JIT module.") # Load module compiled_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(compiled_module) compiled_objects = [] for name in object_names: obj = getattr(compiled_module.lib, name) compiled_objects.append(obj) return compiled_objects, compiled_module ffcx-0.9.0/ffcx/codegeneration/lnodes.py000066400000000000000000000736541470142666300202500ustar00rootroot00000000000000# Copyright (C) 2013-2023 Martin Sandve Alnæs, Chris Richardson # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """LNodes. LNodes is intended as a minimal generic language description. Formatting is done later, depending on the target language. Supported: Floating point (and complex) and integer variables and multidimensional arrays Range loops Simple arithmetic, +-*/ Math operations Logic conditions Comments Not supported: Pointers Function Calls Flow control (if, switch, while) Booleans Strings """ import numbers from collections.abc import Sequence from enum import Enum from typing import Optional import numpy as np import ufl class PRECEDENCE: """An enum-like class for operator precedence levels.""" HIGHEST = 0 LITERAL = 0 SYMBOL = 0 SUBSCRIPT = 2 NOT = 3 NEG = 3 MUL = 4 DIV = 4 ADD = 5 SUB = 5 LT = 7 LE = 7 GT = 7 GE = 7 EQ = 8 NE = 8 AND = 11 OR = 12 CONDITIONAL = 13 ASSIGN = 13 LOWEST = 15 def is_zero_lexpr(lexpr): """Check if an expression is zero.""" return (isinstance(lexpr, LiteralFloat) and lexpr.value == 0.0) or ( isinstance(lexpr, LiteralInt) and lexpr.value == 0 ) def is_one_lexpr(lexpr): """Check if an expression is one.""" return (isinstance(lexpr, LiteralFloat) and lexpr.value == 1.0) or ( isinstance(lexpr, LiteralInt) and lexpr.value == 1 ) def is_negative_one_lexpr(lexpr): """Check if an expression is negative one.""" return (isinstance(lexpr, LiteralFloat) and lexpr.value == -1.0) or ( isinstance(lexpr, LiteralInt) and lexpr.value == -1 ) def float_product(factors): """Build product of float factors. Simplify ones and returning 1.0 if empty sequence. """ factors = [f for f in factors if not is_one_lexpr(f)] if len(factors) == 0: return LiteralFloat(1.0) elif len(factors) == 1: return factors[0] else: return Product(factors) class DataType(Enum): """Representation of data types for variables in LNodes. These can be REAL (same type as geometry), SCALAR (same type as tensor), or INT (for entity indices etc.) """ REAL = 0 SCALAR = 1 INT = 2 BOOL = 3 NONE = 4 def merge_dtypes(dtypes: list[DataType]): """Promote dtype to SCALAR or REAL if either argument matches.""" if DataType.NONE in dtypes: raise ValueError(f"Invalid DataType in LNodes {dtypes}") if DataType.SCALAR in dtypes: return DataType.SCALAR elif DataType.REAL in dtypes: return DataType.REAL elif DataType.INT in dtypes: return DataType.INT elif DataType.BOOL in dtypes: return DataType.BOOL else: raise ValueError(f"Can't get dtype for operation with {dtypes}") class LNode: """Base class for all AST nodes.""" def __eq__(self, other): """Check for equality.""" return NotImplemented def __ne__(self, other): """Check for inequality.""" return NotImplemented class LExpr(LNode): """Base class for all expressions. All subtypes should define a 'precedence' class attribute. """ dtype = DataType.NONE def __getitem__(self, indices): """Get an item.""" return ArrayAccess(self, indices) def __neg__(self): """Negate.""" if isinstance(self, LiteralFloat): return LiteralFloat(-self.value) if isinstance(self, LiteralInt): return LiteralInt(-self.value) return Neg(self) def __add__(self, other): """Add.""" other = as_lexpr(other) if is_zero_lexpr(self): return other if is_zero_lexpr(other): return self if isinstance(other, Neg): return Sub(self, other.arg) return Add(self, other) def __radd__(self, other): """Add.""" other = as_lexpr(other) if is_zero_lexpr(self): return other if is_zero_lexpr(other): return self if isinstance(self, Neg): return Sub(other, self.arg) return Add(other, self) def __sub__(self, other): """Subtract.""" other = as_lexpr(other) if is_zero_lexpr(self): return -other if is_zero_lexpr(other): return self if isinstance(other, Neg): return Add(self, other.arg) if isinstance(self, LiteralInt) and isinstance(other, LiteralInt): return LiteralInt(self.value - other.value) return Sub(self, other) def __rsub__(self, other): """Subtract.""" other = as_lexpr(other) if is_zero_lexpr(self): return other if is_zero_lexpr(other): return -self if isinstance(self, Neg): return Add(other, self.arg) return Sub(other, self) def __mul__(self, other): """Multiply.""" other = as_lexpr(other) if is_zero_lexpr(self): return self if is_zero_lexpr(other): return other if is_one_lexpr(self): return other if is_one_lexpr(other): return self if is_negative_one_lexpr(other): return Neg(self) if is_negative_one_lexpr(self): return Neg(other) if isinstance(self, LiteralInt) and isinstance(other, LiteralInt): return LiteralInt(self.value * other.value) return Mul(self, other) def __rmul__(self, other): """Multiply.""" other = as_lexpr(other) if is_zero_lexpr(self): return self if is_zero_lexpr(other): return other if is_one_lexpr(self): return other if is_one_lexpr(other): return self if is_negative_one_lexpr(other): return Neg(self) if is_negative_one_lexpr(self): return Neg(other) return Mul(other, self) def __div__(self, other): """Divide.""" other = as_lexpr(other) if is_zero_lexpr(other): raise ValueError("Division by zero!") if is_zero_lexpr(self): return self return Div(self, other) def __rdiv__(self, other): """Divide.""" other = as_lexpr(other) if is_zero_lexpr(self): raise ValueError("Division by zero!") if is_zero_lexpr(other): return other return Div(other, self) # TODO: Error check types? __truediv__ = __div__ __rtruediv__ = __rdiv__ __floordiv__ = __div__ __rfloordiv__ = __rdiv__ class LExprOperator(LExpr): """Base class for all expression operators.""" sideeffect = False class LExprTerminal(LExpr): """Base class for all expression terminals.""" sideeffect = False class LiteralFloat(LExprTerminal): """A floating point literal value.""" precedence = PRECEDENCE.LITERAL def __init__(self, value): """Initialise.""" assert isinstance(value, (float, complex)) self.value = value if isinstance(value, complex): self.dtype = DataType.SCALAR else: self.dtype = DataType.REAL def __eq__(self, other): """Check equality.""" return isinstance(other, LiteralFloat) and self.value == other.value def __float__(self): """Convert to float.""" return float(self.value) def __repr__(self): """Representation.""" return str(self.value) class LiteralInt(LExprTerminal): """An integer literal value.""" precedence = PRECEDENCE.LITERAL def __init__(self, value): """Initialise.""" assert isinstance(value, (int, np.number)) self.value = value self.dtype = DataType.INT def __eq__(self, other): """Check equality.""" return isinstance(other, LiteralInt) and self.value == other.value def __hash__(self): """Hash.""" return hash(self.value) def __repr__(self): """Representation.""" return str(self.value) class Symbol(LExprTerminal): """A named symbol.""" precedence = PRECEDENCE.SYMBOL def __init__(self, name: str, dtype): """Initialise.""" assert isinstance(name, str) assert name.replace("_", "").isalnum() self.name = name self.dtype = dtype def __eq__(self, other): """Check equality.""" return isinstance(other, Symbol) and self.name == other.name def __hash__(self): """Hash.""" return hash(self.name) def __repr__(self): """Representation.""" return self.name class MultiIndex(LExpr): """A multi-index for accessing tensors flattened in memory.""" precedence = PRECEDENCE.SYMBOL def __init__(self, symbols: list, sizes: list): """Initialise.""" self.dtype = DataType.INT self.sizes = sizes self.symbols = [as_lexpr(sym) for sym in symbols] for sym in self.symbols: assert sym.dtype == DataType.INT dim = len(sizes) if dim == 0: self.global_index: LExpr = LiteralInt(0) else: stride = [np.prod(sizes[i:]) for i in range(dim)] + [LiteralInt(1)] self.global_index = Sum(n * sym for n, sym in zip(stride[1:], symbols)) @property def dim(self): """Dimension of the multi-index.""" return len(self.sizes) def size(self): """Size of the multi-index.""" return np.prod(self.sizes) def local_index(self, idx): """Get the local index.""" assert idx < len(self.symbols) return self.symbols[idx] def intersection(self, other): """Get the intersection.""" symbols = [] sizes = [] for sym, size in zip(self.symbols, self.sizes): if sym in other.symbols: i = other.symbols.index(sym) assert other.sizes[i] == size symbols.append(sym) sizes.append(size) return MultiIndex(symbols, sizes) def union(self, other): """Get the union. Note: Result may depend on order a.union(b) != b.union(a) """ symbols = self.symbols.copy() sizes = self.sizes.copy() for sym, size in zip(other.symbols, other.sizes): if sym in symbols: i = symbols.index(sym) assert sizes[i] == size else: symbols.append(sym) sizes.append(size) return MultiIndex(symbols, sizes) def difference(self, other): """Get the difference.""" symbols = [] sizes = [] for idx, size in zip(self.symbols, self.sizes): if idx not in other.symbols: symbols.append(idx) sizes.append(size) return MultiIndex(symbols, sizes) def __hash__(self): """Hash.""" return hash(self.global_index.__repr__) class PrefixUnaryOp(LExprOperator): """Base class for unary operators.""" def __init__(self, arg): """Initialise.""" self.arg = as_lexpr(arg) def __eq__(self, other): """Check equality.""" return isinstance(other, type(self)) and self.arg == other.arg class BinOp(LExprOperator): """A binary operator.""" def __init__(self, lhs, rhs): """Initialise.""" self.lhs = as_lexpr(lhs) self.rhs = as_lexpr(rhs) def __eq__(self, other): """Check equality.""" return isinstance(other, type(self)) and self.lhs == other.lhs and self.rhs == other.rhs def __hash__(self): """Hash.""" return hash(self.lhs) + hash(self.rhs) def __repr__(self): """Representation.""" return f"({self.lhs} {self.op} {self.rhs})" class ArithmeticBinOp(BinOp): """An artithmetic binary operator.""" def __init__(self, lhs, rhs): """Initialise.""" self.lhs = as_lexpr(lhs) self.rhs = as_lexpr(rhs) self.dtype = merge_dtypes([self.lhs.dtype, self.rhs.dtype]) class NaryOp(LExprOperator): """Base class for special n-ary operators.""" op = "" def __init__(self, args): """Initialise.""" self.args = [as_lexpr(arg) for arg in args] self.dtype = self.args[0].dtype for arg in self.args: self.dtype = merge_dtypes([self.dtype, arg.dtype]) def __eq__(self, other): """Check equality.""" return ( isinstance(other, type(self)) and len(self.args) == len(other.args) and all(a == b for a, b in zip(self.args, other.args)) ) def __repr__(self) -> str: """Representation.""" return f"{self.op} ".join(f"{i} " for i in self.args) def __hash__(self): """Hash.""" return hash(tuple(self.args)) class Neg(PrefixUnaryOp): """Negation operator.""" precedence = PRECEDENCE.NEG op = "-" def __init__(self, arg): """Initialise.""" self.arg = as_lexpr(arg) self.dtype = self.arg.dtype class Not(PrefixUnaryOp): """Not operator.""" precedence = PRECEDENCE.NOT op = "!" class Add(ArithmeticBinOp): """Add operator.""" precedence = PRECEDENCE.ADD op = "+" class Sub(ArithmeticBinOp): """Subtract operator.""" precedence = PRECEDENCE.SUB op = "-" class Mul(ArithmeticBinOp): """Multiply operator.""" precedence = PRECEDENCE.MUL op = "*" class Div(ArithmeticBinOp): """Division operator.""" precedence = PRECEDENCE.DIV op = "/" class EQ(BinOp): """Equality operator.""" precedence = PRECEDENCE.EQ op = "==" class NE(BinOp): """Inequality operator.""" precedence = PRECEDENCE.NE op = "!=" class LT(BinOp): """Less than operator.""" precedence = PRECEDENCE.LT op = "<" class GT(BinOp): """Greater than operator.""" precedence = PRECEDENCE.GT op = ">" class LE(BinOp): """Less than or equal to operator.""" precedence = PRECEDENCE.LE op = "<=" class GE(BinOp): """Greater than or equal to operator.""" precedence = PRECEDENCE.GE op = ">=" class And(BinOp): """And operator.""" precedence = PRECEDENCE.AND op = "&&" class Or(BinOp): """Or operator.""" precedence = PRECEDENCE.OR op = "||" class Sum(NaryOp): """Sum of any number of operands.""" precedence = PRECEDENCE.ADD op = "+" class Product(NaryOp): """Product of any number of operands.""" precedence = PRECEDENCE.MUL op = "*" class MathFunction(LExprOperator): """A Math Function, with any arguments.""" precedence = PRECEDENCE.HIGHEST def __init__(self, func, args): """Initialise.""" self.function = func self.args = [as_lexpr(arg) for arg in args] self.dtype = self.args[0].dtype def __eq__(self, other): """Check equality.""" return ( isinstance(other, type(self)) and self.function == other.function and len(self.args) == len(other.args) and all(a == b for a, b in zip(self.args, other.args)) ) class AssignOp(BinOp): """Base class for assignment operators.""" precedence = PRECEDENCE.ASSIGN sideeffect = True def __init__(self, lhs, rhs): """Initialise.""" assert isinstance(lhs, LNode) BinOp.__init__(self, lhs, rhs) class Assign(AssignOp): """Assign operator.""" op = "=" class AssignAdd(AssignOp): """Assign add operator.""" op = "+=" class AssignSub(AssignOp): """Assign subtract operator.""" op = "-=" class AssignMul(AssignOp): """Assign multiply operator.""" op = "*=" class AssignDiv(AssignOp): """Assign division operator.""" op = "/=" class ArrayAccess(LExprOperator): """Array access.""" precedence = PRECEDENCE.SUBSCRIPT def __init__(self, array, indices): """Initialise.""" # Typecheck array argument if isinstance(array, Symbol): self.array = array self.dtype = array.dtype elif isinstance(array, ArrayDecl): self.array = array.symbol self.dtype = array.symbol.dtype else: raise ValueError(f"Unexpected array type {type(array).__name__}") # Allow expressions or literals as indices if not isinstance(indices, (list, tuple)): indices = (indices,) self.indices = tuple(as_lexpr(i) for i in indices) # Early error checking for negative array dimensions if any(isinstance(i, int) and i < 0 for i in self.indices): raise ValueError("Index value < 0.") # Additional dimension checks possible if we get an ArrayDecl instead of just a name if isinstance(array, ArrayDecl): if len(self.indices) != len(array.sizes): raise ValueError("Invalid number of indices.") ints = (int, LiteralInt) if any( (isinstance(i, ints) and isinstance(d, ints) and int(i) >= int(d)) for i, d in zip(self.indices, array.sizes) ): raise ValueError("Index value >= array dimension.") def __getitem__(self, indices): """Handle nested expr[i][j].""" if isinstance(indices, list): indices = tuple(indices) elif not isinstance(indices, tuple): indices = (indices,) return ArrayAccess(self.array, self.indices + indices) def __eq__(self, other): """Check equality.""" return ( isinstance(other, type(self)) and self.array == other.array and self.indices == other.indices ) def __hash__(self): """Hash.""" return hash(self.array) def __repr__(self): """Representation.""" return str(self.array) + "[" + ", ".join(str(i) for i in self.indices) + "]" class Conditional(LExprOperator): """Conditional.""" precedence = PRECEDENCE.CONDITIONAL def __init__(self, condition, true, false): """Initialise.""" self.condition = as_lexpr(condition) self.true = as_lexpr(true) self.false = as_lexpr(false) self.dtype = merge_dtypes([self.true.dtype, self.false.dtype]) def __eq__(self, other): """Check equality.""" return ( isinstance(other, type(self)) and self.condition == other.condition and self.true == other.true and self.false == other.false ) def as_lexpr(node): """Typechecks and wraps an object as a valid LExpr. Accepts LExpr nodes, treats int and float as literals. """ if isinstance(node, LExpr): return node elif isinstance(node, numbers.Integral): return LiteralInt(node) elif isinstance(node, numbers.Real): return LiteralFloat(node) else: raise RuntimeError(f"Unexpected LExpr type {type(node)}:\n{node}") class Statement(LNode): """Make an expression into a statement.""" def __init__(self, expr): """Initialise.""" self.expr = as_lexpr(expr) def __eq__(self, other): """Check equality.""" return isinstance(other, type(self)) and self.expr == other.expr def __hash__(self) -> int: """Hash.""" return hash(self.expr) def as_statement(node): """Perform type checking on node and wrap in a suitable statement type if necessary.""" if isinstance(node, StatementList) and len(node.statements) == 1: # Cleans up the expression tree a bit return node.statements[0] elif isinstance(node, Statement): # No-op return node elif isinstance(node, LExprOperator): if node.sideeffect: # Special case for using assignment expressions as statements return Statement(node) else: raise RuntimeError( f"Trying to create a statement of lexprOperator type {type(node)}:\n{node}" ) elif isinstance(node, list): # Convenience case for list of statements if len(node) == 1: # Cleans up the expression tree a bit return as_statement(node[0]) else: return StatementList(node) elif isinstance(node, Section): return node else: raise RuntimeError(f"Unexpected Statement type {type(node)}:\n{node}") class Annotation(Enum): """Annotation.""" fuse = 1 # fuse loops in section unroll = 2 # unroll loop in section licm = 3 # loop invariant code motion factorize = 4 # apply sum factorization class Declaration(Statement): """Base class for all declarations.""" def __init__(self, symbol): """Initialise.""" self.symbol = symbol def __eq__(self, other): """Check equality.""" return isinstance(other, type(self)) and self.symbol == other.symbol def is_declaration(node) -> bool: """Check if a node is a declaration.""" return isinstance(node, VariableDecl) or isinstance(node, ArrayDecl) class Section(LNode): """A section of code with a name and a list of statements.""" def __init__( self, name: str, statements: list[LNode], declarations: Sequence[Declaration], input: Optional[list[Symbol]] = None, output: Optional[list[Symbol]] = None, annotations: Optional[list[Annotation]] = None, ): """Initialise.""" self.name = name self.statements = [as_statement(st) for st in statements] self.annotations = annotations or [] self.input = input or [] self.declarations = declarations or [] self.output = output or [] for decl in self.declarations: assert is_declaration(decl) if decl.symbol not in self.output: self.output.append(decl.symbol) def __eq__(self, other): """Check equality.""" attributes = ("name", "input", "output", "annotations", "statements") return isinstance(other, type(self)) and all( getattr(self, name) == getattr(other, name) for name in attributes ) class StatementList(LNode): """A simple sequence of statements.""" def __init__(self, statements): """Initialise.""" self.statements = [as_statement(st) for st in statements] def __eq__(self, other): """Check equality.""" return isinstance(other, type(self)) and self.statements == other.statements def __hash__(self) -> int: """Hash.""" return hash(tuple(self.statements)) def __repr__(self): """Representation.""" return f"StatementList({self.statements})" class Comment(Statement): """Line comment(s) used for annotating the generated code with human readable remarks.""" def __init__(self, comment): """Initialise.""" assert isinstance(comment, str) self.comment = comment def __eq__(self, other): """Check equality.""" return isinstance(other, type(self)) and self.comment == other.comment def commented_code_list(code, comments): """Add comment to code list if the list is not empty.""" if isinstance(code, LNode): code = [code] assert isinstance(code, list) if code: if not isinstance(comments, (list, tuple)): comments = [comments] comments = [Comment(c) for c in comments] code = comments + code return code # Type and variable declarations class VariableDecl(Declaration): """Declare a variable, optionally define initial value.""" def __init__(self, symbol, value=None): """Initialise.""" assert isinstance(symbol, Symbol) assert symbol.dtype is not None self.symbol = symbol if value is not None: value = as_lexpr(value) self.value = value def __eq__(self, other): """Check equality.""" return ( isinstance(other, type(self)) and self.typename == other.typename and self.symbol == other.symbol and self.value == other.value ) class ArrayDecl(Declaration): """A declaration or definition of an array. Note that just setting values=0 is sufficient to initialize the entire array to zero. Otherwise use nested lists of lists to represent multidimensional array values to initialize to. """ def __init__(self, symbol, sizes=None, values=None, const=False): """Initialise.""" assert isinstance(symbol, Symbol) self.symbol = symbol assert symbol.dtype if sizes is None: assert values is not None sizes = values.shape if isinstance(sizes, int): sizes = (sizes,) self.sizes = tuple(sizes) if values is None: assert sizes is not None # NB! No type checking, assuming nested lists of literal values. Not applying as_lexpr. if isinstance(values, (list, tuple)): self.values = np.asarray(values) else: self.values = values self.const = const self.dtype = symbol.dtype def __eq__(self, other): """Check equality.""" attributes = ("dtype", "symbol", "sizes", "values") return isinstance(other, type(self)) and all( getattr(self, name) == getattr(self, name) for name in attributes ) def __hash__(self) -> int: """Hash.""" return hash(self.symbol) def is_simple_inner_loop(code): """Check if code is a simple inner loop.""" if isinstance(code, ForRange) and is_simple_inner_loop(code.body): return True if isinstance(code, Statement) and isinstance(code.expr, AssignOp): return True return False def depth(code) -> int: """Get depth of code.""" if isinstance(code, ForRange): return 1 + depth(code.body) if isinstance(code, StatementList): return max([depth(c) for c in code.statements]) return 0 class ForRange(Statement): """Slightly higher-level for loop assuming incrementing an index over a range.""" def __init__(self, index, begin, end, body): """Initialise.""" assert isinstance(index, Symbol) or isinstance(index, MultiIndex) self.index = index self.begin = as_lexpr(begin) self.end = as_lexpr(end) assert isinstance(body, list) self.body = StatementList(body) def as_tuple(self): """Convert to a tuple.""" return (self.index, self.begin, self.end, self.body) def __eq__(self, other): """Check equality.""" attributes = ("index", "begin", "end", "body") return isinstance(other, type(self)) and all( getattr(self, name) == getattr(self, name) for name in attributes ) def __hash__(self) -> int: """Hash.""" return hash(self.as_tuple()) def _math_function(op, *args): """Get a math function.""" name = op._ufl_handler_name_ dtype = args[0].dtype if name in ("conj", "real") and dtype == DataType.REAL: assert len(args) == 1 return args[0] if name == "imag" and dtype == DataType.REAL: assert len(args) == 1 return LiteralFloat(0.0) return MathFunction(name, args) # Lookup table for handler to call when the ufl_to_lnodes method (below) is # called, depending on the first argument type. _ufl_call_lookup = { ufl.constantvalue.IntValue: lambda x: LiteralInt(int(x)), ufl.constantvalue.FloatValue: lambda x: LiteralFloat(float(x)), ufl.constantvalue.ComplexValue: lambda x: LiteralFloat(x.value()), ufl.constantvalue.Zero: lambda x: LiteralFloat(0.0), ufl.algebra.Product: lambda x, a, b: a * b, ufl.algebra.Sum: lambda x, a, b: a + b, ufl.algebra.Division: lambda x, a, b: a / b, ufl.algebra.Abs: _math_function, ufl.algebra.Power: _math_function, ufl.algebra.Real: _math_function, ufl.algebra.Imag: _math_function, ufl.algebra.Conj: _math_function, ufl.classes.GT: lambda x, a, b: GT(a, b), ufl.classes.GE: lambda x, a, b: GE(a, b), ufl.classes.EQ: lambda x, a, b: EQ(a, b), ufl.classes.NE: lambda x, a, b: NE(a, b), ufl.classes.LT: lambda x, a, b: LT(a, b), ufl.classes.LE: lambda x, a, b: LE(a, b), ufl.classes.AndCondition: lambda x, a, b: And(a, b), ufl.classes.OrCondition: lambda x, a, b: Or(a, b), ufl.classes.NotCondition: lambda x, a: Not(a), ufl.classes.Conditional: lambda x, c, t, f: Conditional(c, t, f), ufl.classes.MinValue: _math_function, ufl.classes.MaxValue: _math_function, ufl.mathfunctions.Sqrt: _math_function, ufl.mathfunctions.Ln: _math_function, ufl.mathfunctions.Exp: _math_function, ufl.mathfunctions.Cos: _math_function, ufl.mathfunctions.Sin: _math_function, ufl.mathfunctions.Tan: _math_function, ufl.mathfunctions.Cosh: _math_function, ufl.mathfunctions.Sinh: _math_function, ufl.mathfunctions.Tanh: _math_function, ufl.mathfunctions.Acos: _math_function, ufl.mathfunctions.Asin: _math_function, ufl.mathfunctions.Atan: _math_function, ufl.mathfunctions.Erf: _math_function, ufl.mathfunctions.Atan2: _math_function, ufl.mathfunctions.MathFunction: _math_function, ufl.mathfunctions.BesselJ: _math_function, ufl.mathfunctions.BesselY: _math_function, } def ufl_to_lnodes(operator, *args): """Call appropriate handler, depending on the type of operator.""" optype = type(operator) if optype in _ufl_call_lookup: return _ufl_call_lookup[optype](operator, *args) else: raise RuntimeError(f"Missing lookup for expr type {optype}.") def create_nested_for_loops(indices: list[MultiIndex], body): """Create nested for loops over list of indices. The depth of the nested for loops is equal to the sub-indices for all MultiIndex combined. """ ranges = [r for idx in indices for r in idx.sizes] indices = [idx.local_index(i) for idx in indices for i in range(len(idx.sizes))] depth = len(ranges) for i in reversed(range(depth)): body = ForRange(indices[i], 0, ranges[i], body=[body]) return body ffcx-0.9.0/ffcx/codegeneration/optimizer.py000066400000000000000000000147721470142666300210020ustar00rootroot00000000000000"""Optimizer.""" from collections import defaultdict from typing import Union import ffcx.codegeneration.lnodes as L from ffcx.ir.representationutils import QuadratureRule def optimize(code: list[L.LNode], quadrature_rule: QuadratureRule) -> list[L.LNode]: """Optimize code. Args: code: List of LNodes to optimize. quadrature_rule: TODO. Returns: Optimized list of LNodes. """ # Fuse sections with the same name and same annotations code = fuse_sections(code, "Coefficient") code = fuse_sections(code, "Jacobian") for i, section in enumerate(code): if isinstance(section, L.Section): if L.Annotation.fuse in section.annotations: section = fuse_loops(section) if L.Annotation.licm in section.annotations: section = licm(section, quadrature_rule) code[i] = section return code def fuse_sections(code: list[L.LNode], name: str) -> list[L.LNode]: """Fuse sections with the same name. Args: code: List of LNodes to fuse. name: Common name used by the sections that should be fused Returns: Fused list of LNodes. """ statements: list[L.LNode] = [] indices: list[int] = [] input: list[L.Symbol] = [] output: list[L.Symbol] = [] declarations: list[L.Declaration] = [] annotations: list[L.Annotation] = [] for i, section in enumerate(code): if isinstance(section, L.Section): if section.name == name: declarations.extend(section.declarations) statements.extend(section.statements) indices.append(i) input.extend(section.input) output.extend(section.output) annotations = section.annotations # Remove duplicated inputs input = list(set(input)) # Remove duplicated outputs output = list(set(output)) section = L.Section(name, statements, declarations, input, output, annotations) # Replace the first section with the fused section code = code.copy() if indices: code[indices[0]] = section # Remove the other sections code = [c for i, c in enumerate(code) if i not in indices[1:]] return code def fuse_loops(code: L.Section) -> L.Section: """Fuse loops with the same range and same annotations. Args: code: List of LNodes to fuse. Returns: Fused list of LNodes. """ loops = defaultdict(list) output_code = [] for statement in code.statements: if isinstance(statement, L.ForRange): id = (statement.index, statement.begin, statement.end) loops[id].append(statement.body) else: output_code.append(statement) for range, body in loops.items(): output_code.append(L.ForRange(*range, body)) return L.Section(code.name, output_code, code.declarations, code.input, code.output) def get_statements(statement: Union[L.Statement, L.StatementList]) -> list[L.LNode]: """Get statements from a statement list. Args: statement: Statement list. Returns: List of statements. """ if isinstance(statement, L.StatementList): return [statement.expr for statement in statement.statements] else: return [statement.expr] def check_dependency(statement: L.Statement, index: L.Symbol) -> bool: """Check if a statement depends on a given index. Args: statement: Statement to check. index: Index to check. Returns: True if statement depends on index, False otherwise. """ if isinstance(statement, L.ArrayAccess): if index in statement.indices: return True else: for i in statement.indices: if isinstance(i, L.Sum) or isinstance(i, L.Product): if index in i.args: return True elif isinstance(statement, L.Symbol): return False elif isinstance(statement, L.LiteralFloat) or isinstance(statement, L.LiteralInt): return False else: raise NotImplementedError(f"Statement {statement} not supported.") return False def licm(section: L.Section, quadrature_rule: QuadratureRule) -> L.Section: """Perform loop invariant code motion. Args: section: List of LNodes to optimize. quadrature_rule: TODO. Returns: Optimized list of LNodes. """ assert L.Annotation.licm in section.annotations counter = 0 # Check depth of loops depth = L.depth(section.statements[0]) if depth != 2: return section # Get statements in the inner loop outer_loop = section.statements[0] inner_loop = outer_loop.body.statements[0] # Collect all expressions in the inner loop by corresponding RHS expressions = defaultdict(list) for body in inner_loop.body.statements: statements = get_statements(body) assert isinstance(statements, list) for statement in statements: assert isinstance(statement, L.AssignAdd) # Expecting AssignAdd rhs = statement.rhs assert isinstance(rhs, L.Product) # Expecting Sum lhs = statement.lhs assert isinstance(lhs, L.ArrayAccess) # Expecting ArrayAccess expressions[lhs].append(rhs) pre_loop: list[L.LNode] = [] for lhs, rhs in expressions.items(): for r in rhs: hoist_candidates = [] for arg in r.args: dependency = check_dependency(arg, inner_loop.index) if not dependency: hoist_candidates.append(arg) if len(hoist_candidates) > 1: # create new temp name = f"temp_{counter}" counter += 1 temp = L.Symbol(name, L.DataType.SCALAR) for h in hoist_candidates: r.args.remove(h) # update expression with new temp r.args.append(L.ArrayAccess(temp, [outer_loop.index])) # create code for hoisted term size = outer_loop.end.value - outer_loop.begin.value pre_loop.append(L.ArrayDecl(temp, size, [0])) body = L.Assign( L.ArrayAccess(temp, [outer_loop.index]), L.Product(hoist_candidates) ) pre_loop.append( L.ForRange(outer_loop.index, outer_loop.begin, outer_loop.end, [body]) ) section.statements = pre_loop + section.statements return section ffcx-0.9.0/ffcx/codegeneration/symbols.py000066400000000000000000000166051470142666300204450ustar00rootroot00000000000000# Copyright (C) 2011-2023 Martin Sandve Alnæs, Igor A. Baratta # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """FFCx/UFC specific symbol naming.""" import logging import ufl import ffcx.codegeneration.lnodes as L logger = logging.getLogger("ffcx") def ufcx_restriction_postfix(restriction): """Get restriction postfix.""" # TODO: Get restriction postfix from somewhere central if restriction == "+": res = "_0" elif restriction == "-": res = "_1" else: res = "" return res def format_mt_name(basename, mt): """Format variable name for modified terminal.""" access = str(basename) # Add averaged state to name if mt.averaged is not None: avg = f"_a{mt.averaged}" access += avg # Format restriction res = ufcx_restriction_postfix(mt.restriction).replace("_", "_r") access += res # Format global derivatives if mt.global_derivatives: assert basename == "J" der = f"_deriv_{''.join(map(str, mt.global_derivatives))}" access += der # Format local derivatives if mt.local_derivatives: # Convert "listing" derivative multindex into "counting" representation gdim = ufl.domain.extract_unique_domain(mt.terminal).geometric_dimension() ld_counting = tuple(mt.local_derivatives.count(i) for i in range(gdim)) der = f"_d{''.join(map(str, ld_counting))}" access += der # Add flattened component to name if mt.component: comp = f"_c{mt.flat_component}" access += comp return access class FFCXBackendSymbols: """FFCx specific symbol definitions. Provides non-ufl symbols.""" def __init__(self, coefficient_numbering, coefficient_offsets, original_constant_offsets): """Initialise.""" self.coefficient_numbering = coefficient_numbering self.coefficient_offsets = coefficient_offsets self.original_constant_offsets = original_constant_offsets # Keep tabs on tables, so the symbols can be reused self.quadrature_weight_tables = {} self.element_tables = {} # Reusing a single symbol for all quadrature loops, assumed not to be nested. self.quadrature_loop_index = L.Symbol("iq", dtype=L.DataType.INT) # Symbols for the tabulate_tensor function arguments self.element_tensor = L.Symbol("A", dtype=L.DataType.SCALAR) self.coefficients = L.Symbol("w", dtype=L.DataType.SCALAR) self.constants = L.Symbol("c", dtype=L.DataType.SCALAR) self.coordinate_dofs = L.Symbol("coordinate_dofs", dtype=L.DataType.REAL) self.entity_local_index = L.Symbol("entity_local_index", dtype=L.DataType.INT) self.quadrature_permutation = L.Symbol("quadrature_permutation", dtype=L.DataType.INT) # Index for loops over coefficient dofs, assumed to never be used in two nested loops. self.coefficient_dof_sum_index = L.Symbol("ic", dtype=L.DataType.INT) # Table for chunk of custom quadrature weights (including cell measure scaling). self.custom_weights_table = L.Symbol("weights_chunk", dtype=L.DataType.REAL) # Table for chunk of custom quadrature points (physical coordinates). self.custom_points_table = L.Symbol("points_chunk", dtype=L.DataType.REAL) def entity(self, entity_type, restriction): """Entity index for lookup in element tables.""" if entity_type == "cell": # Always 0 for cells (even with restriction) return L.LiteralInt(0) if entity_type == "facet": if restriction == "-": return self.entity_local_index[1] else: return self.entity_local_index[0] elif entity_type == "vertex": return self.entity_local_index[0] else: logging.exception(f"Unknown entity_type {entity_type}") def argument_loop_index(self, iarg): """Loop index for argument iarg.""" indices = ["i", "j", "k", "l"] return L.Symbol(indices[iarg], dtype=L.DataType.INT) def weights_table(self, quadrature_rule): """Table of quadrature weights.""" key = f"weights_{quadrature_rule.id()}" if key not in self.quadrature_weight_tables: self.quadrature_weight_tables[key] = L.Symbol( f"weights_{quadrature_rule.id()}", dtype=L.DataType.REAL ) return self.quadrature_weight_tables[key] def points_table(self, quadrature_rule): """Table of quadrature points (points on the reference integration entity).""" return L.Symbol(f"points_{quadrature_rule.id()}", dtype=L.DataType.REAL) def x_component(self, mt): """Physical coordinate component.""" return L.Symbol(format_mt_name("x", mt), dtype=L.DataType.REAL) def J_component(self, mt): """Jacobian component.""" # FIXME: Add domain number! return L.Symbol(format_mt_name("J", mt), dtype=L.DataType.REAL) def domain_dof_access(self, dof, component, gdim, num_scalar_dofs, restriction): """Domain DOF access.""" # FIXME: Add domain number or offset! offset = 0 if restriction == "-": offset = num_scalar_dofs * 3 return self.coordinate_dofs[3 * dof + component + offset] def coefficient_dof_access(self, coefficient, dof_index): """Coefficient DOF access.""" offset = self.coefficient_offsets[coefficient] w = self.coefficients return w[offset + dof_index] def coefficient_dof_access_blocked( self, coefficient: ufl.Coefficient, index, block_size, dof_offset ): """Blocked coefficient DOF access.""" coeff_offset = self.coefficient_offsets[coefficient] w = self.coefficients _w = L.Symbol(f"_w_{coeff_offset}_{dof_offset}", dtype=L.DataType.SCALAR) unit_stride_access = _w[index] original_access = w[coeff_offset + index * block_size + dof_offset] return unit_stride_access, original_access def coefficient_value(self, mt): """Symbol for variable holding value or derivative component of coefficient.""" c = self.coefficient_numbering[mt.terminal] return L.Symbol(format_mt_name("w%d" % (c,), mt), dtype=L.DataType.SCALAR) def constant_index_access(self, constant, index): """Constant index access.""" offset = self.original_constant_offsets[constant] c = self.constants return c[offset + index] # TODO: Remove this, use table_access instead def element_table(self, tabledata, entity_type, restriction): """Get an element table.""" entity = self.entity(entity_type, restriction) if tabledata.is_uniform: entity = 0 else: entity = self.entity(entity_type, restriction) if tabledata.is_piecewise: iq = 0 else: iq = self.quadrature_loop_index if tabledata.is_permuted: qp = self.quadrature_permutation[0] if restriction == "-": qp = self.quadrature_permutation[1] else: qp = 0 # Return direct access to element table, reusing symbol if possible if tabledata.name not in self.element_tables: self.element_tables[tabledata.name] = L.Symbol(tabledata.name, dtype=L.DataType.REAL) return self.element_tables[tabledata.name][qp][entity][iq] ffcx-0.9.0/ffcx/codegeneration/ufcx.h000066400000000000000000000203211470142666300175070ustar00rootroot00000000000000/// This is UFCx /// This software is released under the terms of the unlicense (see the file /// UNLICENSE). /// /// The FEniCS Project (http://www.fenicsproject.org/) 2006-2021. /// /// UFCx defines the interface between code generated by FFCx and the /// DOLFINx C++ library. Changes here must be reflected both in the FFCx /// code generation and in the DOLFINx library calls. #pragma once #define UFCX_VERSION_MAJOR 0 #define UFCX_VERSION_MINOR 9 #define UFCX_VERSION_MAINTENANCE 0 #define UFCX_VERSION_RELEASE 1 #if UFCX_VERSION_RELEASE #define UFCX_VERSION \ UFCX_VERSION_MAJOR "." UFCX_VERSION_MINOR "." UFCX_VERSION_MAINTENANCE #else #define UFCX_VERSION \ UFCX_VERSION_MAJOR "." UFCX_VERSION_MINOR "." UFCX_VERSION_MAINTENANCE ".dev0" #endif #include #include #ifdef __cplusplus extern "C" { #if defined(__clang__) #define restrict __restrict #elif defined(__GNUG__) #define restrict __restrict__ #elif defined(_MSC_VER) #define restrict __restrict #define __STDC_NO_COMPLEX__ #else #define restrict #endif // restrict #endif // __cplusplus // typedef enum { cell = 0, exterior_facet = 1, interior_facet = 2 } ufcx_integral_type; // /// Tabulate integral into tensor A with compiled quadrature rule /// /// @param[out] A /// @param[in] w Coefficients attached to the form to which the /// tabulated integral belongs. /// /// Dimensions: w[coefficient][restriction][dof]. /// /// Restriction dimension /// applies to interior facet integrals, where coefficients restricted /// to both cells sharing the facet must be provided. /// @param[in] c Constants attached to the form to which the tabulated /// integral belongs. Dimensions: c[constant][dim]. /// @param[in] coordinate_dofs Values of degrees of freedom of /// coordinate element. Defines the geometry of the cell. Dimensions: /// coordinate_dofs[restriction][num_dofs][3]. Restriction /// dimension applies to interior facet integrals, where cell /// geometries for both cells sharing the facet must be provided. /// @param[in] entity_local_index Local index of mesh entity on which /// to tabulate. This applies to facet integrals. /// @param[in] quadrature_permutation For facet integrals, numbers to /// indicate the permutation to be applied to each side of the facet /// to make the orientations of the faces matched up should be passed /// in. If an integer of value N is passed in, then: /// /// - floor(N / 2) gives the number of rotations to apply to the /// facet /// - N % 2 gives the number of reflections to apply to the facet /// /// For integrals not on interior facets, this argument has no effect and a /// null pointer can be passed. For interior facets the array will have size 2 /// (one permutation for each cell adjacent to the facet). typedef void(ufcx_tabulate_tensor_float32)( float* restrict A, const float* restrict w, const float* restrict c, const float* restrict coordinate_dofs, const int* restrict entity_local_index, const uint8_t* restrict quadrature_permutation); /// Tabulate integral into tensor A with compiled /// quadrature rule and double precision /// /// @see ufcx_tabulate_tensor_single typedef void(ufcx_tabulate_tensor_float64)( double* restrict A, const double* restrict w, const double* restrict c, const double* restrict coordinate_dofs, const int* restrict entity_local_index, const uint8_t* restrict quadrature_permutation); #ifndef __STDC_NO_COMPLEX__ /// Tabulate integral into tensor A with compiled /// quadrature rule and complex single precision /// /// @see ufcx_tabulate_tensor_single typedef void(ufcx_tabulate_tensor_complex64)( float _Complex* restrict A, const float _Complex* restrict w, const float _Complex* restrict c, const float* restrict coordinate_dofs, const int* restrict entity_local_index, const uint8_t* restrict quadrature_permutation); #endif // __STDC_NO_COMPLEX__ #ifndef __STDC_NO_COMPLEX__ /// Tabulate integral into tensor A with compiled /// quadrature rule and complex double precision /// /// @see ufcx_tabulate_tensor_single typedef void(ufcx_tabulate_tensor_complex128)( double _Complex* restrict A, const double _Complex* restrict w, const double _Complex* restrict c, const double* restrict coordinate_dofs, const int* restrict entity_local_index, const uint8_t* restrict quadrature_permutation); #endif // __STDC_NO_COMPLEX__ typedef struct ufcx_integral { const bool* enabled_coefficients; ufcx_tabulate_tensor_float32* tabulate_tensor_float32; ufcx_tabulate_tensor_float64* tabulate_tensor_float64; #ifndef __STDC_NO_COMPLEX__ ufcx_tabulate_tensor_complex64* tabulate_tensor_complex64; ufcx_tabulate_tensor_complex128* tabulate_tensor_complex128; #endif // __STDC_NO_COMPLEX__ bool needs_facet_permutations; /// Get the hash of the coordinate element associated with the geometry of the mesh. uint64_t coordinate_element_hash; } ufcx_integral; typedef struct ufcx_expression { /// Evaluate expression into tensor A with compiled evaluation points /// /// @param[out] A /// Dimensions: A[num_points][num_components][num_argument_dofs] /// /// @see ufcx_tabulate_tensor /// ufcx_tabulate_tensor_float32* tabulate_tensor_float32; ufcx_tabulate_tensor_float64* tabulate_tensor_float64; #ifndef __STDC_NO_COMPLEX__ ufcx_tabulate_tensor_complex64* tabulate_tensor_complex64; ufcx_tabulate_tensor_complex128* tabulate_tensor_complex128; #endif // __STDC_NO_COMPLEX__ /// Number of coefficients int num_coefficients; /// Number of constants int num_constants; /// Original coefficient position for each coefficient const int* original_coefficient_positions; /// List of names of coefficients const char** coefficient_names; /// List of names of constants const char** constant_names; /// Number of evaluation points int num_points; /// Dimension of evaluation point int entity_dimension; /// Coordinates of evaluations points. Dimensions: /// points[num_points][entity_dimension] const double* points; /// Shape of expression. Dimension: value_shape[num_components] const int* value_shape; /// Number of components of return_shape int num_components; /// Rank, i.e. number of arguments int rank; } ufcx_expression; /// This class defines the interface for the assembly of the global /// tensor corresponding to a form with r + n arguments, that is, a /// mapping /// /// a : V1 x V2 x ... Vr x W1 x W2 x ... x Wn -> R /// /// with arguments v1, v2, ..., vr, w1, w2, ..., wn. The rank r /// global tensor A is defined by /// /// A = a(V1, V2, ..., Vr, w1, w2, ..., wn), /// /// where each argument Vj represents the application to the /// sequence of basis functions of Vj and w1, w2, ..., wn are given /// fixed functions (coefficients). typedef struct ufcx_form { /// String identifying the form const char* signature; /// Rank of the global tensor (r) int rank; /// Number of coefficients (n) int num_coefficients; /// Number of constants int num_constants; /// Original coefficient position for each coefficient int* original_coefficient_positions; /// List of names of coefficients const char** coefficient_name_map; /// List of names of constants const char** constant_name_map; /// Get the hash of the finite element for the i-th argument function, where 0 <= /// i < r + n. /// /// @param i Argument number if 0 <= i < r Coefficient number j = i /// - r if r + j <= i < r + n uint64_t* finite_element_hashes; /// List of cell, interior facet and exterior facet integrals ufcx_integral** form_integrals; /// IDs for each integral in form_integrals list int* form_integral_ids; /// Offsets for cell, interior facet and exterior facet integrals in form_integrals list int* form_integral_offsets; } ufcx_form; #ifdef __cplusplus #undef restrict #undef __STDC_NO_COMPLEX__ } #endif ffcx-0.9.0/ffcx/codegeneration/utils.py000066400000000000000000000047101470142666300201070ustar00rootroot00000000000000# Copyright (C) 2020-2024 Michal Habera, Chris Richardson and Garth N. Wells # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Utilities.""" import typing import numpy as np import numpy.typing as npt def dtype_to_c_type(dtype: typing.Union[npt.DTypeLike, str]) -> str: """For a NumPy dtype, return the corresponding C type. Args: dtype: Numpy data type, Returns: Corresponding C type. """ # Note: Possible aliases, e.g. numpy.longdouble, should test against char ID if np.dtype(dtype).char == "g": return "long double" if np.dtype(dtype) == np.intc: return "int" elif np.dtype(dtype).char == "f": return "float" elif np.dtype(dtype).char == "d": return "double" elif np.dtype(dtype) == np.complex64: return "float _Complex" elif np.dtype(dtype) == np.complex128: return "double _Complex" else: raise RuntimeError(f"Unknown NumPy type for: {dtype}") def dtype_to_scalar_dtype(dtype: typing.Union[npt.DTypeLike, str]) -> np.dtype: """For a NumPy dtype, return the corresponding real dtype. Args: dtype: Numpy data type Returns: ``numpy.dtype`` for the real component of ``dtype``. """ if np.issubdtype(dtype, np.floating): return np.dtype(dtype) elif np.issubdtype(dtype, np.complexfloating): return np.dtype(dtype).type(0).real.dtype elif np.issubdtype(dtype, np.integer): return np.dtype(dtype) else: raise RuntimeError(f"Cannot get value dtype for '{dtype}'. ") def numba_ufcx_kernel_signature(dtype: npt.DTypeLike, xdtype: npt.DTypeLike): """Return a Numba C signature for the UFCx ``tabulate_tensor`` interface. Args: dtype: The scalar type for the finite element data. xdtype: The geometry float type. Returns: A Numba signature (``numba.core.typing.templates.Signature``). Raises: ImportError: If ``numba`` cannot be imported. """ try: import numba.types as types from numba import from_dtype return types.void( types.CPointer(from_dtype(dtype)), types.CPointer(from_dtype(dtype)), types.CPointer(from_dtype(dtype)), types.CPointer(from_dtype(xdtype)), types.CPointer(types.intc), types.CPointer(types.uint8), ) except ImportError as e: raise e ffcx-0.9.0/ffcx/compiler.py000066400000000000000000000072221470142666300155740ustar00rootroot00000000000000# Copyright (C) 2007-2020 Anders Logg and Michal Habera # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Main interface for compilation of forms. Breaks the compilation into several sequential stages. The output of each stage is the input of the next stage. Compiler stages --------------- 0. Language, parsing - Input: Python code or .ufl file - Output: UFL form This stage consists of parsing and expressing a form in the UFL form language. This stage is handled by UFL. 1. Analysis - Input: UFL form - Output: Preprocessed UFL form and FormData (metadata) This stage preprocesses the UFL form and extracts form metadata. It may also perform simplifications on the form. 2. Code representation - Input: Preprocessed UFL form and FormData (metadata) - Output: Intermediate Representation (IR) This stage examines the input and generates all data needed for code generation. This includes generation of finite element basis functions, extraction of data for mapping of degrees of freedom and possible precomputation of integrals. Most of the complexity of compilation is handled in this stage. The IR is stored as a dictionary, mapping names of UFC functions to data needed for generation of the corresponding code. 3. Code generation - Input: Intermediate Representation (IR) - Output: C code This stage examines the IR and generates the actual C code for the body of each UFC function. The code is stored as a dictionary, mapping names of UFC functions to strings containing the C code of the body of each function. 4. Code formatting - Input: C code - Output: C code files This stage examines the generated C++ code and formats it according to the UFC format, generating as output one or more .h/.c files conforming to the UFC format. """ from __future__ import annotations import logging import typing from time import time import numpy.typing as npt from ffcx.analysis import analyze_ufl_objects from ffcx.codegeneration.codegeneration import generate_code from ffcx.formatting import format_code from ffcx.ir.representation import compute_ir logger = logging.getLogger("ffcx") def _print_timing(stage: int, timing: float): logger.info(f"Compiler stage {stage} finished in {timing:.4f} seconds.") def compile_ufl_objects( ufl_objects: list[typing.Any], options: dict[str, int | float | npt.DTypeLike], object_names: dict[int, str] | None = None, prefix: str | None = None, visualise: bool = False, ) -> tuple[str, str]: """Generate UFC code for a given UFL objects. Args: ufl_objects: Objects to be compiled. Accepts elements, forms, integrals or coordinate mappings. object_names: Map from object Python id to object name prefix: Prefix options: Options visualise: Toggle visualisation """ _object_names = object_names if object_names is not None else {} _prefix = prefix if prefix is not None else "" # Stage 1: analysis cpu_time = time() analysis = analyze_ufl_objects(ufl_objects, options["scalar_type"]) # type: ignore _print_timing(1, time() - cpu_time) # Stage 2: intermediate representation cpu_time = time() ir = compute_ir(analysis, _object_names, _prefix, options, visualise) _print_timing(2, time() - cpu_time) # Stage 3: code generation cpu_time = time() code = generate_code(ir, options) _print_timing(3, time() - cpu_time) # Stage 4: format code cpu_time = time() code_h, code_c = format_code(code) _print_timing(4, time() - cpu_time) return code_h, code_c ffcx-0.9.0/ffcx/element_interface.py000066400000000000000000000034541470142666300174360ustar00rootroot00000000000000# Copyright (C) 2021 Matthew W. Scroggs and Chris Richardson # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Finite element interface.""" import basix import basix.ufl import numpy as np import numpy.typing as npt from basix import CellType as _CellType def basix_index(indices: tuple[int]) -> int: """Get the Basix index of a derivative.""" return basix.index(*indices) def create_quadrature( cellname: str, degree: int, rule: str, elements: list[basix.ufl._ElementBase] ) -> tuple[npt.ArrayLike, npt.ArrayLike]: """Create a quadrature rule.""" if cellname == "vertex": return (np.ones((1, 0), dtype=np.float64), np.ones(1, dtype=np.float64)) else: celltype = _CellType[cellname] polyset_type = basix.PolysetType.standard for e in elements: polyset_type = basix.polyset_superset(celltype, polyset_type, e.polyset_type) return basix.make_quadrature( celltype, degree, rule=basix.quadrature.string_to_type(rule), polyset_type=polyset_type ) def reference_cell_vertices(cellname: str) -> npt.NDArray[np.float64]: """Get the vertices of a reference cell.""" return np.asarray(basix.geometry(_CellType[cellname])) def map_facet_points( points: npt.NDArray[np.float64], facet: int, cellname: str ) -> npt.NDArray[np.float64]: """Map points from a reference facet to a physical facet.""" geom = np.asarray(basix.geometry(_CellType[cellname])) facet_vertices = [geom[i] for i in basix.topology(_CellType[cellname])[-2][facet]] return np.asarray( [ facet_vertices[0] + sum((i - facet_vertices[0]) * j for i, j in zip(facet_vertices[1:], p)) for p in points ], dtype=np.float64, ) ffcx-0.9.0/ffcx/formatting.py000066400000000000000000000026471470142666300161420ustar00rootroot00000000000000# Copyright (C) 2009-2018 Anders Logg and Garth N. Wells # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Compiler stage 5: Code formatting. This module implements the formatting of UFC code from a given dictionary of generated C++ code for the body of each UFC function. It relies on templates for UFC code available as part of the module ufcx_utils. """ from __future__ import annotations import logging import os from ffcx.codegeneration.codegeneration import CodeBlocks logger = logging.getLogger("ffcx") def format_code(code: CodeBlocks) -> tuple[str, str]: """Format given code in UFC format. Returns two strings with header and source file contents.""" logger.info(79 * "*") logger.info("Compiler stage 5: Formatting code") logger.info(79 * "*") code_c = "" code_h = "" for parts_code in code: code_h += "".join([c[0] for c in parts_code]) code_c += "".join([c[1] for c in parts_code]) return code_h, code_c def write_code(code_h, code_c, prefix, output_dir): """Write code to files.""" _write_file(code_h, prefix, ".h", output_dir) _write_file(code_c, prefix, ".c", output_dir) def _write_file(output, prefix, postfix, output_dir): """Write generated code to file.""" filename = os.path.join(output_dir, prefix + postfix) with open(filename, "w") as hfile: hfile.write(output) ffcx-0.9.0/ffcx/git_commit_hash.py.in000066400000000000000000000004341470142666300175230ustar00rootroot00000000000000# Copyright (C) 2016 Jan Blechta # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later def git_commit_hash(): """Return git changeset hash (returns "unknown" if changeset is not known).""" return "@GIT_COMMIT_HASH" ffcx-0.9.0/ffcx/ir/000077500000000000000000000000001470142666300140175ustar00rootroot00000000000000ffcx-0.9.0/ffcx/ir/__init__.py000066400000000000000000000000431470142666300161250ustar00rootroot00000000000000"""Intermediate representation.""" ffcx-0.9.0/ffcx/ir/analysis/000077500000000000000000000000001470142666300156425ustar00rootroot00000000000000ffcx-0.9.0/ffcx/ir/analysis/__init__.py000066400000000000000000000003411470142666300177510ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Algorithms for the analysis phase of the form compilation.""" ffcx-0.9.0/ffcx/ir/analysis/factorization.py000066400000000000000000000266371470142666300211060ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Algorithms for factorizing argument dependent monomials.""" import logging from functools import singledispatch from ufl import as_ufl, conditional from ufl.classes import Argument, Conditional, Conj, Division, Product, Sum, Zero from ffcx.ir.analysis.graph import ExpressionGraph from ffcx.ir.analysis.modified_terminals import analyse_modified_terminal, strip_modified_terminal logger = logging.getLogger("ffcx") def build_argument_indices(S): """Build ordered list of indices to modified arguments.""" arg_indices = [] for i, v in S.nodes.items(): arg = strip_modified_terminal(v["expression"]) if isinstance(arg, Argument): arg_indices.append(i) # Make a canonical ordering of vertex indices for modified arguments def arg_ordering_key(i): """Return a key for sorting argument vertex indices. Key is based on the properties of the modified terminal. """ mt = analyse_modified_terminal(S.nodes[i]["expression"]) return mt.argument_ordering_key() ordered_arg_indices = sorted(arg_indices, key=arg_ordering_key) return ordered_arg_indices def graph_insert(F, expr): """Add new expression expr to factorisation graph or return existing index.""" fi = F.e2i.get(expr) if fi is None: fi = F.number_of_nodes() F.add_node(fi, expression=expr) F.e2i[expr] = fi return fi # Reuse these empty objects where appropriate to save memory noargs = {} # type: ignore @singledispatch def handler(v, fac, sf, F): """Handler.""" # Error checking if any(fac): raise RuntimeError( f"Assuming that a {type(v)} cannot be applied to arguments. " "If this is wrong please report a bug." ) # Record non-argument subexpression raise RuntimeError("No arguments") @handler.register(Sum) def handle_sum(v, fac, sf, F): """Handle a sum.""" if len(fac) != 2: raise RuntimeError("Assuming binary sum here. This can be fixed if needed.") fac0 = fac[0] fac1 = fac[1] argkeys = set(fac0) | set(fac1) if argkeys: # f*arg + g*arg = (f+g)*arg argkeys = sorted(argkeys) keylen = len(argkeys[0]) factors = {} for argkey in argkeys: if len(argkey) != keylen: raise RuntimeError("Expecting equal argument rank terms among summands.") fi0 = fac0.get(argkey) fi1 = fac1.get(argkey) if fi0 is None: fisum = fi1 elif fi1 is None: fisum = fi0 else: f0 = F.nodes[fi0]["expression"] f1 = F.nodes[fi1]["expression"] fisum = graph_insert(F, f0 + f1) factors[argkey] = fisum else: # non-arg + non-arg raise RuntimeError("No arguments") return factors @handler.register(Product) def handle_product(v, fac, sf, F): """Handle a product.""" if len(fac) != 2: raise RuntimeError("Assuming binary product here. This can be fixed if needed.") fac0 = fac[0] fac1 = fac[1] if not fac0 and not fac1: # non-arg * non-arg raise RuntimeError("No arguments") elif not fac0: # non-arg * arg # Record products of non-arg operand with each factor of arg-dependent operand f0 = sf[0] factors = {} for k1 in sorted(fac1): f1 = F.nodes[fac1[k1]]["expression"] factors[k1] = graph_insert(F, f0 * f1) elif not fac1: # arg * non-arg # Record products of non-arg operand with each factor of arg-dependent operand f1 = sf[1] factors = {} for k0 in sorted(fac0): f0 = F.nodes[fac0[k0]]["expression"] factors[k0] = graph_insert(F, f1 * f0) else: # arg * arg # Record products of each factor of arg-dependent operand factors = {} for k0 in sorted(fac0): f0 = F.nodes[fac0[k0]]["expression"] for k1 in sorted(fac1): f1 = F.nodes[fac1[k1]]["expression"] argkey = tuple(sorted(k0 + k1)) # sort key for canonical representation factors[argkey] = graph_insert(F, f0 * f1) return factors @handler.register(Conj) def handle_conj(v, fac, sf, F): """Handle a conjugation.""" fac = fac[0] if fac: factors = {} for k in fac: f0 = F.nodes[fac[k]]["expression"] factors[k] = graph_insert(F, Conj(f0)) else: raise RuntimeError("No arguments") return factors @handler.register(Division) def handle_division(v, fac, sf, F): """Handle a division.""" fac0 = fac[0] fac1 = fac[1] assert not fac1, "Cannot divide by arguments." if fac0: # arg / non-arg # Record products of non-arg operand with each factor of arg-dependent operand f1 = sf[1] factors = {} for k0 in sorted(fac0): f0 = F.nodes[fac0[k0]]["expression"] factors[k0] = graph_insert(F, f0 / f1) else: # non-arg / non-arg raise RuntimeError("No arguments") return factors @handler.register(Conditional) def handle_conditional(v, fac, sf, F): """Handle a conditional.""" fac0 = fac[0] fac1 = fac[1] fac2 = fac[2] assert not fac0, "Cannot have argument in condition." if not (fac1 or fac2): # non-arg ? non-arg : non-arg raise RuntimeError("No arguments") else: f0 = sf[0] f1 = sf[1] f2 = sf[2] # Term conditional(c, argument, non-argument) is not legal unless non-argument is 0.0 assert fac1 or isinstance(f1, Zero) assert fac2 or isinstance(f2, Zero) assert () not in fac1 assert () not in fac2 z = as_ufl(0.0) # In general, can decompose like this: # conditional(c, sum_i fi*ui, sum_j fj*uj) -> sum_i conditional(c, fi, 0)*ui # + sum_j conditional(c, 0, fj)*uj mas = sorted(set(fac1.keys()) | set(fac2.keys())) factors = {} for k in mas: fi1 = fac1.get(k) fi2 = fac2.get(k) f1 = z if fi1 is None else F.nodes[fi1]["expression"] f2 = z if fi2 is None else F.nodes[fi2]["expression"] factors[k] = graph_insert(F, conditional(f0, f1, f2)) return factors def compute_argument_factorization(S, rank): """Factorize a scalar expression graph w.r.t. scalar Argument components. Returns: a triplet (AV, FV, IM), where: - The scalar argument component subgraph: AV[ai] = v with the property SV[arg_indices] == AV[:] - An expression graph vertex list with all non-argument factors: FV[fi] = f with the property that none of the expressions depend on Arguments. - A dict representation of the final integrand of rank r: IM = { (ai1_1, ..., ai1_r): fi1, (ai2_1, ..., ai2_r): fi2, } This mapping represents the factorization of SV[-1] w.r.t. Arguments s.t.: SV[-1] := sum(FV[fik] * product(AV[ai] for ai in aik) for aik, fik in IM.items()) where := means equivalence in the mathematical sense, of course in a different technical representation. """ # Extract argument component subgraph arg_indices = build_argument_indices(S) AV = [S.nodes[i]["expression"] for i in arg_indices] # Data structure for building non-argument factors F = ExpressionGraph() # Attach a quick lookup dict for expression to index F.e2i = {} # Insert arguments as first entries in factorisation graph # They will not be connected to other nodes, but will be available # and referred to by the factorisation indices of the 'target' nodes. for v in AV: graph_insert(F, v) # Adding 1.0 as an expression allows avoiding special representation # of arguments when first visited by representing "v" as "1*v" one_index = graph_insert(F, as_ufl(1.0)) # Intermediate factorization for each vertex in SV on the format # SV_factors[si] = None # if SV[si] does not depend on arguments # SV_factors[si] = { argkey: fi } # if SV[si] does depend on arguments, where: # FV[fi] is the expression SV[si] with arguments factored out # argkey is a tuple with indices into SV for each of the argument components SV[si] depends on # SV_factors[si] = { argkey1: fi1, argkey2: fi2, ... } # if SV[si] # is a linear combination of multiple argkey configurations # Factorize each subexpression in order: for si, attr in S.nodes.items(): deps = S.out_edges[si] v = attr["expression"] if si in arg_indices: assert len(deps) == 0 # v is a modified Argument factors = {(si,): one_index} else: fac = [S.nodes[d]["factors"] for d in deps] if not any(fac): # Entirely scalar (i.e. no arg factors) # Just add unchanged to F graph_insert(F, v) factors = noargs else: # Get scalar factors for dependencies # which do not have arg factors sf = [] for i, d in enumerate(deps): if fac[i]: sf.append(None) else: sf.append(S.nodes[d]["expression"]) # Use appropriate handler to deal with Sum, Product, etc. factors = handler(v, fac, sf, F) attr["factors"] = factors assert len(F.nodes) == len(F.e2i) # Prepare a mapping from component of expression to factors factors = {} S_targets = [i for i, v in S.nodes.items() if v.get("target", False)] for S_target in S_targets: # Get the factorizations of the target values if S.nodes[S_target]["factors"] == {}: if rank == 0: # Functionals and expressions: store as no args * factor for comp in S.nodes[S_target]["component"]: factors[comp] = {(): F.e2i[S.nodes[S_target]["expression"]]} else: # Zero form of arity 1 or higher: make factors empty pass else: # Forms of arity 1 or higher: # Map argkeys from indices into SV to indices into AV, # and resort keys for canonical representation for argkey, fi in S.nodes[S_target]["factors"].items(): ai_fi = {tuple(sorted(arg_indices.index(si) for si in argkey)): fi} for comp in S.nodes[S_target]["component"]: if factors.get(comp): factors[comp].update(ai_fi) else: factors[comp] = ai_fi # Indices into F that are needed for final result for comp, target in factors.items(): for argkey, fi in target.items(): F.nodes[fi]["target"] = F.nodes[fi].get("target", []) F.nodes[fi]["target"].append(argkey) F.nodes[fi]["component"] = F.nodes[fi].get("component", []) F.nodes[fi]["component"].append(comp) # Compute dependencies in FV for i, v in F.nodes.items(): expr = v["expression"] if not expr._ufl_is_terminal_ and not expr._ufl_is_terminal_modifier_: for o in expr.ufl_operands: F.add_edge(i, F.e2i[o]) return F ffcx-0.9.0/ffcx/ir/analysis/graph.py000066400000000000000000000203021470142666300173120ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Linearized data structure for the computational graph.""" import logging import numpy as np import ufl from ffcx.ir.analysis.modified_terminals import is_modified_terminal from ffcx.ir.analysis.reconstruct import reconstruct from ffcx.ir.analysis.valuenumbering import ValueNumberer logger = logging.getLogger("ffcx") class ExpressionGraph: """A directed multi-edge graph. ExpressionGraph allows multiple edges between the same nodes, and respects the insertion order of nodes and edges. """ def __init__(self): """Initialise.""" # Data structures for directed multi-edge graph self.nodes = {} self.out_edges = {} self.in_edges = {} def number_of_nodes(self): """Get number of nodes.""" return len(self.nodes) def add_node(self, key, **kwargs): """Add a node with optional properties.""" self.nodes[key] = kwargs self.out_edges[key] = [] self.in_edges[key] = [] def add_edge(self, node1, node2): """Add a directed edge from node1 to node2.""" if node1 not in self.nodes or node2 not in self.nodes: raise KeyError("Adding edge to unknown node") self.out_edges[node1] += [node2] self.in_edges[node2] += [node1] def build_graph_vertices(expressions, skip_terminal_modifiers=False): """Build graph vertices.""" # Count unique expression nodes G = ExpressionGraph() G.e2i = _count_nodes_with_unique_post_traversal(expressions, skip_terminal_modifiers) # Invert the map to get index->expression GV = sorted(G.e2i, key=G.e2i.get) # Add nodes to 'new' graph structure for i, v in enumerate(GV): G.add_node(i, expression=v) for comp, expr in enumerate(expressions): # Get vertex index representing input expression root V_target = G.e2i[expr] G.nodes[V_target]["target"] = True G.nodes[V_target]["component"] = G.nodes[V_target].get("component", []) G.nodes[V_target]["component"].append(comp) return G def build_scalar_graph(expression): """Build list representation of expression graph covering the given expressions.""" # Populate with vertices G = build_graph_vertices([expression], skip_terminal_modifiers=False) # Build more fine grained computational graph of scalar subexpressions scalar_expressions = rebuild_with_scalar_subexpressions(G) # Build new list representation of graph where all # vertices of V represent single scalar operations G = build_graph_vertices(scalar_expressions, skip_terminal_modifiers=True) # Compute graph edges V_deps = [] for i, v in G.nodes.items(): expr = v["expression"] if expr._ufl_is_terminal_ or expr._ufl_is_terminal_modifier_: V_deps.append(()) else: V_deps.append([G.e2i[o] for o in expr.ufl_operands]) for i, edges in enumerate(V_deps): for j in edges: if i == j: continue G.add_edge(i, j) return G def rebuild_with_scalar_subexpressions(G): """Build a new expression2index mapping where each subexpression is scalar valued. Input: - G.e2i - G.V - G.V_symbols - G.total_unique_symbols Output: - NV - Array with reverse mapping from index to expression - nvs - Tuple of ne2i indices corresponding to the last vertex of G.V """ # Compute symbols over graph and rebuild scalar expression # # New expression which represents usually an algebraic operation # generates a new symbol value_numberer = ValueNumberer(G) # V_symbols maps an index of a node to a list of # symbols which are present in that node V_symbols = value_numberer.compute_symbols() total_unique_symbols = value_numberer.symbol_count # Array to store the scalar subexpression in for each symbol W = np.empty(total_unique_symbols, dtype=object) # Iterate over each graph node in order for i, v in G.nodes.items(): expr = v["expression"] # Find symbols of v components vs = V_symbols[i] # Skip if there's nothing new here (should be the case for indexing types) # New symbols are not given to indexing types, so W[symbol] already equals # an expression, since it was assigned to the symbol in a previous loop # cycle if all(W[s] is not None for s in vs): continue if is_modified_terminal(expr): sh = expr.ufl_shape if sh: # Store each terminal expression component. We may not # actually need all of these later, but that will be # optimized away. # Note: symmetries will be dealt with in the value numbering. ws = [expr[c] for c in ufl.permutation.compute_indices(sh)] else: # Store single modified terminal expression component if len(vs) != 1: raise RuntimeError( "Expecting single symbol for scalar valued modified terminal." ) ws = [expr] # FIXME: Replace ws[:] with 0's if its table is empty # Possible redesign: loop over modified terminals only first, # then build tables for them, set W[s] = 0.0 for modified terminals with zero table, # then loop over non-(modified terminal)s to reconstruct expression. else: # Find symbols of operands sops = [] for j, vop in enumerate(expr.ufl_operands): if isinstance(vop, ufl.classes.MultiIndex): # TODO: Store MultiIndex in G.V and allocate a symbol to it for this to work if not isinstance(expr, ufl.classes.IndexSum): raise RuntimeError(f"Not expecting a {type(expr)}.") sops.append(()) else: # TODO: Build edge datastructure and use instead? # k = G.E[i][j] k = G.e2i[vop] sops.append(V_symbols[k]) # Fetch reconstructed operand expressions wops = [tuple(W[k] for k in so) for so in sops] # Reconstruct scalar subexpressions of v ws = reconstruct(expr, wops) # Store all scalar subexpressions for v symbols if len(vs) != len(ws): raise RuntimeError("Expecting one symbol for each expression.") # Store each new scalar subexpression in W at the index of its symbol handled = set() for s, w in zip(vs, ws): if W[s] is None: W[s] = w handled.add(s) else: assert ( s in handled ) # Result of symmetry! - but I think this never gets reached anyway (CNR) # Find symbols of final v from input graph vs = V_symbols[-1] scalar_expressions = W[vs] return scalar_expressions def _count_nodes_with_unique_post_traversal(expressions, skip_terminal_modifiers=False): """Yield o for each node o in expr, child before parent. Never visits a node twice. """ def getops(e): """Get a modifiable list of operands of e. Optionally treating modified terminals as a unit. """ # TODO: Maybe use e._ufl_is_terminal_modifier_ if e._ufl_is_terminal_ or (skip_terminal_modifiers and is_modified_terminal(e)): return [] else: return list(e.ufl_operands) e2i = {} stack = [(expr, getops(expr)) for expr in reversed(expressions)] while stack: expr, ops = stack[-1] if expr in e2i: stack.pop() continue for i, o in enumerate(ops): if o is not None and o not in e2i: stack.append((o, getops(o))) ops[i] = None break else: if not isinstance(expr, (ufl.classes.MultiIndex, ufl.classes.Label)): count = len(e2i) e2i[expr] = count stack.pop() return e2i ffcx-0.9.0/ffcx/ir/analysis/indexing.py000066400000000000000000000112731470142666300200250ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Algorithms for working with multiindices.""" import ufl from ufl.classes import ComponentTensor, FixedIndex, Index, Indexed from ufl.permutation import compute_indices from ufl.utils.indexflattening import flatten_multiindex, shape_to_strides def map_indexed_arg_components(indexed): """Build a map from flattened components to subexpression. Builds integer list mapping between flattened components of indexed expression and its underlying tensor-valued subexpression. """ assert isinstance(indexed, Indexed) # AKA indexed = tensor[multiindex] tensor, multiindex = indexed.ufl_operands # AKA e1 = e2[multiindex] # (this renaming is historical, but kept for consistency with all the variables *1,*2 below) e2 = tensor e1 = indexed # Get tensor and index shape sh1 = e1.ufl_shape sh2 = e2.ufl_shape fi1 = e1.ufl_free_indices fi2 = e2.ufl_free_indices fid1 = e1.ufl_index_dimensions fid2 = e2.ufl_index_dimensions # Compute regular and total shape tsh1 = sh1 + fid1 tsh2 = sh2 + fid2 # r1 = len(tsh1) r2 = len(tsh2) # str1 = shape_to_strides(tsh1) str2 = shape_to_strides(tsh2) assert not sh1 assert sh2 # Must have shape to be indexed in the first place assert ufl.product(tsh1) <= ufl.product(tsh2) # Build map from fi2/fid2 position (-offset nmui) to fi1/fid1 position ind2_to_ind1_map = [None] * len(fi2) for k, i in enumerate(fi2): ind2_to_ind1_map[k] = fi1.index(i) # Build map from fi1/fid1 position to mi position nmui = len(multiindex) multiindex_to_ind1_map = [None] * nmui for k, i in enumerate(multiindex): if isinstance(i, Index): multiindex_to_ind1_map[k] = fi1.index(i.count()) # Build map from flattened e1 component to flattened e2 component perm1 = compute_indices(tsh1) ni1 = ufl.product(tsh1) # Situation: e1 = e2[mi] d1 = [None] * ni1 p2 = [None] * r2 assert len(sh2) == nmui for k, i in enumerate(multiindex): if isinstance(i, FixedIndex): p2[k] = int(i) for c1, p1 in enumerate(perm1): for k, i in enumerate(multiindex): if isinstance(i, Index): p2[k] = p1[multiindex_to_ind1_map[k]] for k, i in enumerate(ind2_to_ind1_map): p2[nmui + k] = p1[i] c2 = flatten_multiindex(p2, str2) d1[c1] = c2 # Consistency checks assert all(isinstance(x, int) for x in d1) assert len(set(d1)) == len(d1) return d1 def map_component_tensor_arg_components(tensor): """Build a map from flattened components to subexpression. Builds integer list mapping between flattended components of tensor and its underlying indexed subexpression. """ assert isinstance(tensor, ComponentTensor) # AKA tensor = as_tensor(indexed, multiindex) indexed, multiindex = tensor.ufl_operands e1 = indexed e2 = tensor # e2 = as_tensor(e1, multiindex) mi = [i for i in multiindex if isinstance(i, Index)] # Get tensor and index shapes sh1 = e1.ufl_shape # (sh)ape of e1 sh2 = e2.ufl_shape # (sh)ape of e2 fi1 = e1.ufl_free_indices # (f)ree (i)ndices of e1 fi2 = e2.ufl_free_indices # ... fid1 = e1.ufl_index_dimensions # (f)ree (i)ndex (d)imensions of e1 fid2 = e2.ufl_index_dimensions # ... # Compute total shape (tsh) of e1 and e2 tsh1 = sh1 + fid1 tsh2 = sh2 + fid2 r1 = len(tsh1) # 'total rank' or e1 r2 = len(tsh2) # ... str1 = shape_to_strides(tsh1) assert not sh1 assert sh2 assert len(mi) == len(multiindex) assert ufl.product(tsh1) == ufl.product(tsh2) assert fi1 assert all(i in fi1 for i in fi2) nmui = len(multiindex) assert nmui == len(sh2) # Build map from fi2/fid2 position (-offset nmui) to fi1/fid1 position p2_to_p1_map = [None] * r2 for k, i in enumerate(fi2): p2_to_p1_map[k + nmui] = fi1.index(i) # Build map from fi1/fid1 position to mi position for k, i in enumerate(mi): p2_to_p1_map[k] = fi1.index(mi[k].count()) # Build map from flattened e1 component to flattened e2 component perm2 = compute_indices(tsh2) ni2 = ufl.product(tsh2) # Situation: e2 = as_tensor(e1, mi) d2 = [None] * ni2 p1 = [None] * r1 for c2, p2 in enumerate(perm2): for k2, k1 in enumerate(p2_to_p1_map): p1[k1] = p2[k2] c1 = flatten_multiindex(p1, str1) d2[c2] = c1 # Consistency checks assert all(isinstance(x, int) for x in d2) assert len(set(d2)) == len(d2) return d2 ffcx-0.9.0/ffcx/ir/analysis/modified_terminals.py000066400000000000000000000241251470142666300220560ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Modified terminals.""" import logging import typing from ufl.classes import ( Argument, CellAvg, FacetAvg, FixedIndex, FormArgument, Grad, Indexed, Jacobian, ReferenceGrad, ReferenceValue, Restricted, SpatialCoordinate, ) from ufl.permutation import build_component_numbering logger = logging.getLogger("ffcx") class ModifiedTerminal: """A modified terminal.""" def __init__( self, expr, terminal, reference_value: bool, base_shape, base_symmetry, component: tuple[int, ...], flat_component: int, global_derivatives: tuple[int, ...], local_derivatives: tuple[int, ...], averaged: typing.Union[None, str], restriction: typing.Union[None, str], ): """Initialise. Args: expr: The original UFL expression terminal: the underlying Terminal object reference_value: whether this is represented in reference frame base_shape: base shape base_symmetry: base symmetry component: the global component of the Terminal flat_component: flattened local component of the Terminal, considering symmetry global_derivatives: each entry is a derivative in that global direction local_derivatives: each entry is a derivative in that local direction averaged: Entity to average over (None, 'facet' or 'cell') restriction: The restriction (None, '+' or '-') """ # The original expression self.expr = expr # The underlying terminal expression self.terminal = terminal # Are we seeing the terminal in physical or reference frame self.reference_value = reference_value # Get the shape of the core terminal or its reference value, # this is the shape that component and flat_component refers to self.base_shape = base_shape self.base_symmetry = base_symmetry # Components self.component = component self.flat_component = flat_component # Derivatives self.global_derivatives = global_derivatives self.local_derivatives = local_derivatives # Evaluation method (alternatives: { None, 'facet_midpoint', # 'cell_midpoint', 'facet_avg', 'cell_avg' }) self.averaged = averaged # Restriction to one cell or the other for interior facet integrals self.restriction = restriction def as_tuple(self): """Return a tuple with hashable values that uniquely identifies this modified terminal. Some of the derived variables can be omitted here as long as they are fully determined from the variables that are included here. """ t = self.terminal # FIXME: Terminal is not sortable... rv = self.reference_value # bs = self.base_shape # bsy = self.base_symmetry # c = self.component fc = self.flat_component gd = self.global_derivatives ld = self.local_derivatives a = self.averaged r = self.restriction return (t, rv, fc, gd, ld, a, r) def argument_ordering_key(self): """Return a key for deterministic sorting of argument vertex indices. The key is based on the properties of the modified terminal. Used in factorization but moved here for closeness with ModifiedTerminal attributes. """ t = self.terminal assert isinstance(t, Argument) n = t.number() assert n >= 0 p = t.part() rv = self.reference_value # bs = self.base_shape # bsy = self.base_symmetry # c = self.component fc = self.flat_component gd = self.global_derivatives ld = self.local_derivatives a = self.averaged r = self.restriction return (n, p, rv, fc, gd, ld, a, r) def __hash__(self): """Hash.""" return hash(self.as_tuple()) def __eq__(self, other): """Check equality.""" return isinstance(other, ModifiedTerminal) and self.as_tuple() == other.as_tuple() def __str__(self): """Format as string.""" return ( f"terminal: {self.terminal}\n" f"global_derivatives: {self.global_derivatives}\n" f"local_derivatives: {self.local_derivatives}\n" f"averaged: {self.averaged}\n" f"component: {self.component}\n" f"restriction: {self.restriction}" ) def is_modified_terminal(v): """Check if v is a terminal or a terminal wrapped in terminal modifier types.""" while not v._ufl_is_terminal_: if v._ufl_is_terminal_modifier_: v = v.ufl_operands[0] else: return False return True def strip_modified_terminal(v): """Extract core Terminal from a modified terminal or return None.""" while not v._ufl_is_terminal_: if v._ufl_is_terminal_modifier_: v = v.ufl_operands[0] else: return None return v def analyse_modified_terminal(expr): """Analyse a so-called 'modified terminal' expression. Return its properties in more compact form as a ModifiedTerminal object. A modified terminal expression is an object of a Terminal subtype, wrapped in terminal modifier types. The wrapper types can include 0-* Grad or ReferenceGrad objects, and 0-1 ReferenceValue, 0-1 Restricted, 0-1 Indexed, and 0-1 FacetAvg or CellAvg objects. """ # Data to determine component = None global_derivatives = [] local_derivatives = [] reference_value = None restriction = None averaged = None # Start with expr and strip away layers of modifiers t = expr while not t._ufl_is_terminal_: if isinstance(t, Indexed): if component is not None: raise RuntimeError("Got twice indexed terminal.") t, i = t.ufl_operands component = [int(j) for j in i] if not all(isinstance(j, FixedIndex) for j in i): raise RuntimeError("Expected only fixed indices.") elif isinstance(t, ReferenceValue): if reference_value is not None: raise RuntimeError("Got twice pulled back terminal!") (t,) = t.ufl_operands reference_value = True elif isinstance(t, ReferenceGrad): if not component: # covers None or () raise RuntimeError("Got local gradient of terminal without prior indexing.") (t,) = t.ufl_operands local_derivatives.append(component[-1]) component = component[:-1] elif isinstance(t, Grad): if not component: # covers None or () raise RuntimeError("Got local gradient of terminal without prior indexing.") (t,) = t.ufl_operands global_derivatives.append(component[-1]) component = component[:-1] elif isinstance(t, Restricted): if restriction is not None: raise RuntimeError("Got twice restricted terminal!") restriction = t._side (t,) = t.ufl_operands elif isinstance(t, CellAvg): if averaged is not None: raise RuntimeError("Got twice averaged terminal!") (t,) = t.ufl_operands averaged = "cell" elif isinstance(t, FacetAvg): if averaged is not None: raise RuntimeError("Got twice averaged terminal!") (t,) = t.ufl_operands averaged = "facet" elif t._ufl_terminal_modifiers_: raise RuntimeError( f"Missing handler for terminal modifier type {type(t)}, object is {t!r}." ) else: raise RuntimeError(f"Unexpected type {type(t)} object {t}.") # Make canonical representation of derivatives global_derivatives = tuple(sorted(global_derivatives)) local_derivatives = tuple(sorted(local_derivatives)) # Make reference_value true or false reference_value = reference_value or False # Consistency check if isinstance(t, (SpatialCoordinate, Jacobian)): pass else: if local_derivatives and not reference_value: raise RuntimeError("Local derivatives of non-local value is not legal.") if global_derivatives and reference_value: raise RuntimeError("Global derivatives of local value is not legal.") # Make sure component is an integer tuple if component is None: component = () else: component = tuple(component) # Get the shape of the core terminal or its reference value, this is # the shape that component refers to if isinstance(t, FormArgument): element = t.ufl_function_space().ufl_element() if reference_value: # Ignoring symmetry, assuming already applied in conversion # to reference frame base_symmetry = {} base_shape = element.reference_value_shape else: base_symmetry = element.symmetry() base_shape = t.ufl_shape else: base_symmetry = {} base_shape = t.ufl_shape # Assert that component is within the shape of the (reference) # terminal if len(component) != len(base_shape): raise RuntimeError("Length of component does not match rank of (reference) terminal.") if not all(c >= 0 and c < d for c, d in zip(component, base_shape)): raise RuntimeError("Component indices {component} are outside value shape {base_shape}.") # Flatten component vi2si, _ = build_component_numbering(base_shape, base_symmetry) flat_component = vi2si[component] return ModifiedTerminal( expr, t, reference_value, base_shape, base_symmetry, component, flat_component, global_derivatives, local_derivatives, averaged, restriction, ) ffcx-0.9.0/ffcx/ir/analysis/reconstruct.py000066400000000000000000000142111470142666300205660ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs and Chris Richardson # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Reconstruct.""" import ufl def handle_scalar_nary(o, ops): """Handle a scalary nary operator.""" if o.ufl_shape != (): raise RuntimeError("Expecting scalar.") sops = [op[0] for op in ops] return [o._ufl_expr_reconstruct_(*sops)] def handle_condition(o, ops): """Handle a condition.""" # A condition is always scalar, so len(op) == 1 sops = [op[0] for op in ops] return [o._ufl_expr_reconstruct_(*sops)] def handle_conditional(o, ops): """Handle a conditional.""" # A condition can be non scalar symbols = [] n = len(ops[1]) if len(ops[0]) != 1: raise RuntimeError("Condition should be scalar.") if n != len(ops[2]): raise RuntimeError("Conditional branches should have same shape.") for i in range(len(ops[1])): sops = (ops[0][0], ops[1][i], ops[2][i]) symbols.append(o._ufl_expr_reconstruct_(*sops)) return symbols def handle_elementwise_unary(o, ops): """Handle a elementwise unary operator.""" if len(ops) > 1: raise RuntimeError("Expecting unary operator.") return [o._ufl_expr_reconstruct_(op) for op in ops[0]] def handle_division(o, ops): """Handle a division.""" if len(ops) != 2: raise RuntimeError("Expecting two operands.") if len(ops[1]) != 1: raise RuntimeError("Expecting scalar divisor.") (b,) = ops[1] return [o._ufl_expr_reconstruct_(a, b) for a in ops[0]] def handle_sum(o, ops): """Handle a sum.""" if len(ops) != 2: raise RuntimeError("Expecting two operands.") if len(ops[0]) != len(ops[1]): raise RuntimeError("Expecting scalar divisor.") return [o._ufl_expr_reconstruct_(a, b) for a, b in zip(ops[0], ops[1])] def handle_product(o, ops): """Handle a product.""" if len(ops) != 2: raise RuntimeError("Expecting two operands.") # Get the simple cases out of the way if len(ops[0]) == 1: # True scalar * something (a,) = ops[0] return [ufl.classes.Product(a, b) for b in ops[1]] elif len(ops[1]) == 1: # Something * true scalar (b,) = ops[1] return [ufl.classes.Product(a, b) for a in ops[0]] # Neither of operands are true scalars, this is the tricky part o0, o1 = o.ufl_operands # Get shapes and index shapes fi = o.ufl_free_indices fi0 = o0.ufl_free_indices fi1 = o1.ufl_free_indices fid = o.ufl_index_dimensions fid0 = o0.ufl_index_dimensions fid1 = o1.ufl_index_dimensions # Need to map each return component to one component of o0 and # one component of o1 indices = ufl.permutation.compute_indices(fid) # Compute which component of o0 is used in component (comp,ind) of o # Compute strides within free index spaces ist0 = ufl.utils.indexflattening.shape_to_strides(fid0) ist1 = ufl.utils.indexflattening.shape_to_strides(fid1) # Map o0 and o1 indices to o indices indmap0 = [fi.index(i) for i in fi0] indmap1 = [fi.index(i) for i in fi1] indks = [ ( ufl.utils.indexflattening.flatten_multiindex([ind[i] for i in indmap0], ist0), ufl.utils.indexflattening.flatten_multiindex([ind[i] for i in indmap1], ist1), ) for ind in indices ] # Build products for scalar components results = [ufl.classes.Product(ops[0][k0], ops[1][k1]) for k0, k1 in indks] return results def handle_index_sum(o, ops): """Handle an index sum.""" summand, mi = o.ufl_operands ic = mi[0].count() fi = summand.ufl_free_indices fid = summand.ufl_index_dimensions ipos = fi.index(ic) d = fid[ipos] # Compute "macro-dimensions" before and after i in the total shape of a predim = ufl.product(summand.ufl_shape) * ufl.product(fid[:ipos]) postdim = ufl.product(fid[ipos + 1 :]) # Map each flattened total component of summand to # flattened total component of indexsum o by removing # axis corresponding to summation index ii. ss = ops[0] # Scalar subexpressions of summand if len(ss) != predim * postdim * d: raise RuntimeError("Mismatching number of subexpressions.") sops = [] for i in range(predim): iind = i * (postdim * d) for k in range(postdim): ind = iind + k sops.append([ss[ind + j * postdim] for j in range(d)]) # For each scalar output component, sum over collected subcomponents # TODO: Need to split this into binary additions to work with future CRSArray format, # i.e. emitting more expressions than there are symbols for this node. results = [sum(sop) for sop in sops] return results # TODO: To implement compound tensor operators such as dot and inner, # we need to identify which index to do the contractions over, # and build expressions such as sum(a*b for a,b in zip(aops, bops)) _reconstruct_call_lookup = { ufl.classes.MathFunction: handle_scalar_nary, ufl.classes.Abs: handle_scalar_nary, ufl.classes.MinValue: handle_scalar_nary, ufl.classes.MaxValue: handle_scalar_nary, ufl.classes.Real: handle_elementwise_unary, ufl.classes.Imag: handle_elementwise_unary, ufl.classes.Power: handle_scalar_nary, ufl.classes.BesselFunction: handle_scalar_nary, ufl.classes.Atan2: handle_scalar_nary, ufl.classes.Product: handle_product, ufl.classes.Division: handle_division, ufl.classes.Sum: handle_sum, ufl.classes.IndexSum: handle_index_sum, ufl.classes.Conj: handle_elementwise_unary, ufl.classes.Conditional: handle_conditional, ufl.classes.Condition: handle_condition, } def reconstruct(o, *args): """Reconstruct.""" # First look for exact match f = _reconstruct_call_lookup.get(type(o), False) if f: return f(o, *args) else: # Look for parent class types instead for k in _reconstruct_call_lookup.keys(): if isinstance(o, k): return _reconstruct_call_lookup[k](o, *args) # Nothing found raise RuntimeError(f"Not expecting expression of type {type(o)} in here.") ffcx-0.9.0/ffcx/ir/analysis/valuenumbering.py000066400000000000000000000212231470142666300212370ustar00rootroot00000000000000# Copyright (C) 2011-2017 Martin Sandve Alnæs # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Algorithms for value numbering within computational graphs.""" import logging import ufl from ufl.pullback import SymmetricPullback from ffcx.ir.analysis.indexing import ( map_component_tensor_arg_components, map_indexed_arg_components, ) from ffcx.ir.analysis.modified_terminals import analyse_modified_terminal logger = logging.getLogger("ffcx") class ValueNumberer: """Maps scalar components to unique values. An algorithm to map the scalar components of an expression node to unique value numbers, with fallthrough for types that can be mapped to the value numbers of their operands. """ def __init__(self, G): """Initialise.""" self.symbol_count = 0 self.G = G self.V_symbols = [] self.call_lookup = { ufl.classes.Expr: self.expr, ufl.classes.Argument: self.form_argument, ufl.classes.Coefficient: self.form_argument, ufl.classes.Grad: self._modified_terminal, ufl.classes.ReferenceGrad: self._modified_terminal, ufl.classes.FacetAvg: self._modified_terminal, ufl.classes.CellAvg: self._modified_terminal, ufl.classes.Restricted: self._modified_terminal, ufl.classes.ReferenceValue: self._modified_terminal, ufl.classes.Indexed: self.indexed, ufl.classes.ComponentTensor: self.component_tensor, ufl.classes.ListTensor: self.list_tensor, ufl.classes.Variable: self.variable, } def new_symbols(self, n): """Generate new symbols with a running counter.""" begin = self.symbol_count end = begin + n self.symbol_count = end return list(range(begin, end)) def new_symbol(self): """Generate new symbol with a running counter.""" begin = self.symbol_count self.symbol_count += 1 return begin def get_node_symbols(self, expr): """Get node symbols.""" idx = [i for i, v in self.G.nodes.items() if v["expression"] == expr][0] return self.V_symbols[idx] def compute_symbols(self): """Compute symbols.""" for i, v in self.G.nodes.items(): expr = v["expression"] symbol = None # First look for exact type match f = self.call_lookup.get(type(expr), False) if f: symbol = f(expr) else: # Look for parent class types instead for k in self.call_lookup.keys(): if isinstance(expr, k): symbol = self.call_lookup[k](expr) break if symbol is None: # Nothing found raise RuntimeError(f"Not expecting type {type(expr)} here.") self.V_symbols.append(symbol) return self.V_symbols def expr(self, v): """Create new symbols for expressions that represent new values.""" n = ufl.product(v.ufl_shape + v.ufl_index_dimensions) return self.new_symbols(n) def form_argument(self, v): """Create new symbols for expressions that represent new values.""" e = v.ufl_function_space().ufl_element() if isinstance(e.pullback, SymmetricPullback): # Build symbols with symmetric components skipped symbols = [] mapped_symbols = {} for c in ufl.permutation.compute_indices(v.ufl_shape): # Build mapped component mc with symmetries from element considered mc = min(i for i, j in e.pullback._symmetry.items() if j == e.pullback._symmetry[c]) # Get existing symbol or create new and store with mapped component mc as key s = mapped_symbols.get(mc) if s is None: s = self.new_symbol() mapped_symbols[mc] = s symbols.append(s) else: n = ufl.product(v.ufl_shape + v.ufl_index_dimensions) symbols = self.new_symbols(n) return symbols # Handle modified terminals with element symmetries and second derivative symmetries! # terminals are implemented separately, or maybe they don't need to be? def _modified_terminal(self, v): """Handle modified terminal. Modifiers: terminal: the underlying Terminal object global_derivatives: tuple of ints, each meaning derivative in that global direction local_derivatives: tuple of ints, each meaning derivative in that local direction reference_value: bool, whether this is represented in reference frame averaged: None, 'facet' or 'cell' restriction: None, '+' or '-' component: tuple of ints, the global component of the Terminal flat_component: single int, flattened local component of the Terminal, considering symmetry """ # (1) mt.terminal.ufl_shape defines a core indexing space UNLESS mt.reference_value, # in which case the reference value shape of the element must be used. # (2) mt.terminal.ufl_element().symmetry() defines core symmetries # (3) averaging and restrictions define distinct symbols, no additional symmetries # (4) two or more grad/reference_grad defines distinct symbols with additional symmetries # v is not necessary scalar here, indexing in (0,...,0) picks the first scalar component # to analyse, which should be sufficient to get the base shape and derivatives if v.ufl_shape: mt = analyse_modified_terminal(v[(0,) * len(v.ufl_shape)]) else: mt = analyse_modified_terminal(v) # Get derivatives num_ld = len(mt.local_derivatives) num_gd = len(mt.global_derivatives) assert not (num_ld and num_gd) if num_ld: domain = ufl.domain.extract_unique_domain(mt.terminal) tdim = domain.topological_dimension() d_components = ufl.permutation.compute_indices((tdim,) * num_ld) elif num_gd: domain = ufl.domain.extract_unique_domiain(mt.terminal) gdim = domain.geometric_dimension() d_components = ufl.permutation.compute_indices((gdim,) * num_gd) else: d_components = [()] # Get base shape without the derivative axes base_components = ufl.permutation.compute_indices(mt.base_shape) # Build symbols with symmetric components and derivatives skipped symbols = [] mapped_symbols = {} for bc in base_components: for dc in d_components: # Build mapped component mc with symmetries from element # and derivatives combined mbc = mt.base_symmetry.get(bc, bc) mdc = tuple(sorted(dc)) mc = mbc + mdc # Get existing symbol or create new and store with # mapped component mc as key s = mapped_symbols.get(mc) if s is None: s = self.new_symbol() mapped_symbols[mc] = s symbols.append(s) # Consistency check before returning symbols assert not v.ufl_free_indices if ufl.product(v.ufl_shape) != len(symbols): raise RuntimeError("Internal error in value numbering.") return symbols def indexed(self, Aii): """Return indexed value. This is implemented as a fall-through operation. """ # Reuse symbols of arg A for Aii A = Aii.ufl_operands[0] # Get symbols of argument A A_symbols = self.get_node_symbols(A) # Map A_symbols to Aii_symbols d = map_indexed_arg_components(Aii) symbols = [A_symbols[k] for k in d] return symbols def component_tensor(self, A): """Component tensor.""" # Reuse symbols of arg Aii for A Aii = A.ufl_operands[0] # Get symbols of argument Aii Aii_symbols = self.get_node_symbols(Aii) # Map A_symbols to Aii_symbols d = map_component_tensor_arg_components(A) symbols = [Aii_symbols[k] for k in d] return symbols def list_tensor(self, v): """List tensor.""" symbols = [] for row in v.ufl_operands: symbols.extend(self.get_node_symbols(row)) return symbols def variable(self, v): """Direct reuse of all symbols.""" return self.get_node_symbols(v.ufl_operands[0]) ffcx-0.9.0/ffcx/ir/analysis/visualise.py000066400000000000000000000042001470142666300202140ustar00rootroot00000000000000# Copyright (C) 2018 Chris Richardson # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Utility to draw graphs.""" from ufl.classes import ( Argument, Division, FloatValue, Indexed, IntValue, Product, ReferenceValue, Sum, ) from ffcx.ir.analysis.modified_terminals import strip_modified_terminal def visualise_graph(Gx, filename): """Visualise a graph.""" try: import pygraphviz as pgv except ImportError: raise RuntimeError("Install pygraphviz") if Gx.number_of_nodes() > 400: print("Skipping visualisation") return G = pgv.AGraph(strict=False, directed=True) for nd, v in Gx.nodes.items(): ex = v["expression"] label = ex.__class__.__name__ if isinstance(ex, Sum): label = "+" elif isinstance(ex, Product): label = "*" elif isinstance(ex, Division): label = "/" elif isinstance(ex, (IntValue, FloatValue)): label = ex.value() elif isinstance(ex, (Indexed, ReferenceValue)): label = str(ex) G.add_node(nd, label="[%d] %s" % (nd, label)) arg = strip_modified_terminal(ex) if isinstance(arg, Argument): G.get_node(nd).attr["shape"] = "box" stat = v.get("status") if stat == "piecewise": G.get_node(nd).attr["color"] = "blue" G.get_node(nd).attr["penwidth"] = 5 elif stat == "varying": G.get_node(nd).attr["color"] = "red" G.get_node(nd).attr["penwidth"] = 5 elif stat == "inactive": G.get_node(nd).attr["color"] = "dimgray" G.get_node(nd).attr["penwidth"] = 5 t = v.get("target") if t: G.get_node(nd).attr["label"] += ":" + str(t) G.get_node(nd).attr["shape"] = "hexagon" c = v.get("component") if c: G.get_node(nd).attr["label"] += f", comp={c}" for nd, eds in Gx.out_edges.items(): for ed in eds: G.add_edge(nd, ed) G.layout(prog="dot") G.draw(filename) ffcx-0.9.0/ffcx/ir/elementtables.py000066400000000000000000000540631470142666300172250ustar00rootroot00000000000000# Copyright (C) 2013-2017 Martin Sandve Alnæs # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Tools for precomputed tables of terminal values.""" import logging import typing import basix.ufl import numpy as np import numpy.typing as npt import ufl from ffcx.element_interface import basix_index from ffcx.ir.representationutils import ( create_quadrature_points_and_weights, integral_type_to_entity_dim, map_integral_points, ) logger = logging.getLogger("ffcx") # Using same defaults as np.allclose default_rtol = 1e-6 default_atol = 1e-9 piecewise_ttypes = ("piecewise", "fixed", "ones", "zeros") uniform_ttypes = ("fixed", "ones", "zeros", "uniform") class ModifiedTerminalElement(typing.NamedTuple): """Modified terminal element.""" element: basix.ufl._ElementBase averaged: str local_derivatives: tuple[int, ...] fc: int class UniqueTableReferenceT(typing.NamedTuple): """Unique table reference.""" name: str values: npt.NDArray[np.float64] offset: int block_size: int ttype: str is_piecewise: bool is_uniform: bool is_permuted: bool has_tensor_factorisation: bool tensor_factors: list[typing.Any] tensor_permutation: np.typing.NDArray[np.int32] def equal_tables(a, b, rtol=default_rtol, atol=default_atol): """Check if two tables are equal.""" a = np.asarray(a) b = np.asarray(b) if a.shape != b.shape: return False else: return np.allclose(a, b, rtol=rtol, atol=atol) def clamp_table_small_numbers( table, rtol=default_rtol, atol=default_atol, numbers=(-1.0, 0.0, 1.0) ): """Clamp almost 0,1,-1 values to integers. Returns new table.""" # Get shape of table and number of columns, defined as the last axis table = np.asarray(table) for n in numbers: table[np.where(np.isclose(table, n, rtol=rtol, atol=atol))] = n return table def get_ffcx_table_values( points, cell, integral_type, element, avg, entity_type, derivative_counts, flat_component, codim, ): """Extract values from FFCx element table. Returns a 3D numpy array with axes (entity number, quadrature point number, dof number) """ deriv_order = sum(derivative_counts) if integral_type in ufl.custom_integral_types: # Use quadrature points on cell for analysis in custom integral types integral_type = "cell" assert not avg if integral_type == "expression": # FFCx tables for expression are generated as either interior cell points # or points on a facet if entity_type == "cell": integral_type = "cell" else: integral_type = "exterior_facet" if avg in ("cell", "facet"): # Redefine points to compute average tables # Make sure this is not called with points, that doesn't make sense # assert points is None # Not expecting derivatives of averages assert not any(derivative_counts) assert deriv_order == 0 # Doesn't matter if it's exterior or interior facet integral, # just need a valid integral type to create quadrature rule if avg == "cell": integral_type = "cell" elif avg == "facet": integral_type = "exterior_facet" if isinstance(element, basix.ufl.QuadratureElement): points = element._points weights = element._weights else: # Make quadrature rule and get points and weights points, weights = create_quadrature_points_and_weights( integral_type, cell, element.embedded_superdegree(), "default", [element] ) # Tabulate table of basis functions and derivatives in points for each entity tdim = cell.topological_dimension() entity_dim = integral_type_to_entity_dim(integral_type, tdim) num_entities = cell.num_sub_entities(entity_dim) # Extract arrays for the right scalar component component_tables = [] component_element, offset, stride = element.get_component_element(flat_component) for entity in range(num_entities): if codim == 0: entity_points = map_integral_points(points, integral_type, cell, entity) elif codim == 1: entity_points = points else: raise RuntimeError("Codimension > 1 isn't supported.") tbl = component_element.tabulate(deriv_order, entity_points) tbl = tbl[basix_index(derivative_counts)] component_tables.append(tbl) if avg in ("cell", "facet"): # Compute numeric integral of the each component table wsum = sum(weights) for entity, tbl in enumerate(component_tables): num_dofs = tbl.shape[1] tbl = np.dot(tbl, weights) / wsum tbl = np.reshape(tbl, (1, num_dofs)) component_tables[entity] = tbl # Loop over entities and fill table blockwise (each block = points x dofs) # Reorder axes as (points, dofs) instead of (dofs, points) assert len(component_tables) == num_entities num_points, num_dofs = component_tables[0].shape shape = (1, num_entities, num_points, num_dofs) res = np.zeros(shape) for entity in range(num_entities): res[:, entity, :, :] = component_tables[entity] return {"array": res, "offset": offset, "stride": stride} def generate_psi_table_name( quadrature_rule, element_counter, averaged: str, entity_type, derivative_counts, flat_component ): """Generate a name for the psi table. Format: FE#_C#_D###[_AC|_AF|][_F|V][_Q#], where '#' will be an integer value and: - FE is a simple counter to distinguish the various bases, it will be assigned in an arbitrary fashion. - C is the component number if any (this does not yet take into account tensor valued functions) - D is the number of derivatives in each spatial direction if any. If the element is defined in 3D, then D012 means d^3(*)/dydz^2. - AC marks that the element values are averaged over the cell - AF marks that the element values are averaged over the facet - F marks that the first array dimension enumerates facets on the cell - V marks that the first array dimension enumerates vertices on the cell - Q unique ID of quadrature rule, to distinguish between tables in a mixed quadrature rule setting """ name = "FE%d" % element_counter if flat_component is not None: name += "_C%d" % flat_component if any(derivative_counts): name += "_D" + "".join(str(d) for d in derivative_counts) name += {None: "", "cell": "_AC", "facet": "_AF"}[averaged] name += {"cell": "", "facet": "_F", "vertex": "_V"}[entity_type] name += f"_Q{quadrature_rule.id()}" return name def get_modified_terminal_element(mt) -> typing.Optional[ModifiedTerminalElement]: """Get modified terminal element.""" gd = mt.global_derivatives ld = mt.local_derivatives domain = ufl.domain.extract_unique_domain(mt.terminal) # Extract element from FormArguments and relevant GeometricQuantities if isinstance(mt.terminal, ufl.classes.FormArgument): if gd and mt.reference_value: raise RuntimeError("Global derivatives of reference values not defined.") elif ld and not mt.reference_value: raise RuntimeError("Local derivatives of global values not defined.") element = mt.terminal.ufl_function_space().ufl_element() fc = mt.flat_component elif isinstance(mt.terminal, ufl.classes.SpatialCoordinate): if mt.reference_value: raise RuntimeError("Not expecting reference value of x.") if gd: raise RuntimeError("Not expecting global derivatives of x.") element = domain.ufl_coordinate_element() if not ld: fc = mt.flat_component else: # Actually the Jacobian expressed as reference_grad(x) fc = mt.flat_component # x-component assert len(mt.component) == 1 assert mt.component[0] == mt.flat_component elif isinstance(mt.terminal, ufl.classes.Jacobian): if mt.reference_value: raise RuntimeError("Not expecting reference value of J.") if gd: raise RuntimeError("Not expecting global derivatives of J.") element = domain.ufl_coordinate_element() assert len(mt.component) == 2 # Translate component J[i,d] to x element context rgrad(x[i])[d] fc, d = mt.component # x-component, derivative ld = tuple(sorted((d,) + ld)) else: return None assert (mt.averaged is None) or not (ld or gd) # Change derivatives format for table lookup tdim = domain.topological_dimension() local_derivatives: tuple[int, ...] = tuple(ld.count(i) for i in range(tdim)) return ModifiedTerminalElement(element, mt.averaged, local_derivatives, fc) def permute_quadrature_interval(points, reflections=0): """Permute quadrature points for an interval.""" output = points.copy() for p in output: assert len(p) < 2 or np.isclose(p[1], 0) assert len(p) < 3 or np.isclose(p[2], 0) for _ in range(reflections): for n, p in enumerate(output): output[n] = [1 - p[0]] return output def permute_quadrature_triangle(points, reflections=0, rotations=0): """Permute quadrature points for a triangle.""" output = points.copy() for p in output: assert len(p) < 3 or np.isclose(p[2], 0) for _ in range(rotations): for n, p in enumerate(output): output[n] = [p[1], 1 - p[0] - p[1]] for _ in range(reflections): for n, p in enumerate(output): output[n] = [p[1], p[0]] return output def permute_quadrature_quadrilateral(points, reflections=0, rotations=0): """Permute quadrature points for a quadrilateral.""" output = points.copy() for p in output: assert len(p) < 3 or np.isclose(p[2], 0) for _ in range(rotations): for n, p in enumerate(output): output[n] = [p[1], 1 - p[0]] for _ in range(reflections): for n, p in enumerate(output): output[n] = [p[1], p[0]] return output def build_optimized_tables( quadrature_rule, cell, integral_type, entity_type, modified_terminals, existing_tables, use_sum_factorization, is_mixed_dim, rtol=default_rtol, atol=default_atol, ): """Build the element tables needed for a list of modified terminals. Input: entity_type - str modified_terminals - ordered sequence of unique modified terminals FIXME: Document Output: mt_tables - dict(ModifiedTerminal: table data) """ # Add to element tables analysis = {} for mt in modified_terminals: res = get_modified_terminal_element(mt) if res: analysis[mt] = res # Build element numbering using topological ordering so subelements # get priority all_elements = [res[0] for res in analysis.values()] unique_elements = ufl.algorithms.sort_elements( set(ufl.algorithms.analysis.extract_sub_elements(all_elements)) ) element_numbers = {element: i for i, element in enumerate(unique_elements)} mt_tables = {} _existing_tables = existing_tables.copy() all_tensor_factors = [] tensor_n = 0 for mt in modified_terminals: res = analysis.get(mt) if not res: continue element, avg, local_derivatives, flat_component = res # Generate table and store table name with modified terminal # Build name for this particular table element_number = element_numbers[element] name = generate_psi_table_name( quadrature_rule, element_number, avg, entity_type, local_derivatives, flat_component ) # FIXME - currently just recalculate the tables every time, # only reusing them if they match numerically. # It should be possible to reuse the cached tables by name, but # the dofmap offset may differ due to restriction. tdim = cell.topological_dimension() codim = tdim - element.cell.topological_dimension() assert codim >= 0 if codim > 1: raise RuntimeError("Codimension > 1 isn't supported.") # Only permute quadrature rules for interior facets integrals and for # the codim zero element in mixed-dimensional integrals. The latter is # needed because a cell may see its sub-entities as being oriented # differently to their global orientation if integral_type == "interior_facet" or (is_mixed_dim and codim == 0): if tdim == 1 or codim == 1: # Do not add permutations if codim-1 as facets have already gotten a global # orientation in DOLFINx t = get_ffcx_table_values( quadrature_rule.points, cell, integral_type, element, avg, entity_type, local_derivatives, flat_component, codim, ) elif tdim == 2: new_table = [] for ref in range(2): new_table.append( get_ffcx_table_values( permute_quadrature_interval(quadrature_rule.points, ref), cell, integral_type, element, avg, entity_type, local_derivatives, flat_component, codim, ) ) t = new_table[0] t["array"] = np.vstack([td["array"] for td in new_table]) elif tdim == 3: cell_type = cell.cellname() if cell_type == "tetrahedron": new_table = [] for rot in range(3): for ref in range(2): new_table.append( get_ffcx_table_values( permute_quadrature_triangle(quadrature_rule.points, ref, rot), cell, integral_type, element, avg, entity_type, local_derivatives, flat_component, codim, ) ) t = new_table[0] t["array"] = np.vstack([td["array"] for td in new_table]) elif cell_type == "hexahedron": new_table = [] for rot in range(4): for ref in range(2): new_table.append( get_ffcx_table_values( permute_quadrature_quadrilateral( quadrature_rule.points, ref, rot ), cell, integral_type, element, avg, entity_type, local_derivatives, flat_component, codim, ) ) t = new_table[0] t["array"] = np.vstack([td["array"] for td in new_table]) else: t = get_ffcx_table_values( quadrature_rule.points, cell, integral_type, element, avg, entity_type, local_derivatives, flat_component, codim, ) # Clean up table tbl = clamp_table_small_numbers(t["array"], rtol=rtol, atol=atol) tabletype = analyse_table_type(tbl) if tabletype in piecewise_ttypes: # Reduce table to dimension 1 along num_points axis in generated code tbl = tbl[:, :, :1, :] if tabletype in uniform_ttypes: # Reduce table to dimension 1 along num_entities axis in generated code tbl = tbl[:, :1, :, :] is_permuted = is_permuted_table(tbl) if not is_permuted: # Reduce table along num_perms axis tbl = tbl[:1, :, :, :] # Check for existing identical table new_table = True for table_name in _existing_tables: if equal_tables(tbl, _existing_tables[table_name]): name = table_name tbl = _existing_tables[name] new_table = False break if new_table: _existing_tables[name] = tbl cell_offset = 0 if use_sum_factorization and (not quadrature_rule.has_tensor_factors): raise RuntimeError("Sum factorization not available for this quadrature rule.") tensor_factors = None tensor_perm = None if ( use_sum_factorization and element.has_tensor_product_factorisation and len(element.get_tensor_product_representation()) == 1 and quadrature_rule.has_tensor_factors ): factors = element.get_tensor_product_representation() tensor_factors = [] for i, j in enumerate(factors[0]): pts = quadrature_rule.tensor_factors[i][0] d = local_derivatives[i] sub_tbl = j.tabulate(d, pts)[d] sub_tbl = sub_tbl.reshape(1, 1, sub_tbl.shape[0], sub_tbl.shape[1]) for i in all_tensor_factors: if i.values.shape == sub_tbl.shape and np.allclose(i.values, sub_tbl): tensor_factors.append(i) break else: ut = UniqueTableReferenceT( f"FE_TF{tensor_n}", sub_tbl, None, None, None, False, False, False, False, None, None, ) all_tensor_factors.append(ut) tensor_factors.append(ut) mt_tables[ut.name] = ut tensor_n += 1 tensor_perm = factors[0][1] if mt.restriction == "-" and isinstance(mt.terminal, ufl.classes.FormArgument): # offset = 0 or number of element dofs, if restricted to "-" cell_offset = element.dim offset = cell_offset + t["offset"] block_size = t["stride"] # tables is just np.arrays, mt_tables hold metadata too mt_tables[mt] = UniqueTableReferenceT( name, tbl, offset, block_size, tabletype, tabletype in piecewise_ttypes, tabletype in uniform_ttypes, is_permuted, tensor_factors is not None, tensor_factors, tensor_perm, ) return mt_tables def is_zeros_table(table, rtol=default_rtol, atol=default_atol): """Check if table values are all zero.""" return np.prod(table.shape) == 0 or np.allclose( table, np.zeros(table.shape), rtol=rtol, atol=atol ) def is_ones_table(table, rtol=default_rtol, atol=default_atol): """Check if table values are all one.""" return np.allclose(table, np.ones(table.shape), rtol=rtol, atol=atol) def is_quadrature_table(table, rtol=default_rtol, atol=default_atol): """Check if table is a quadrature table.""" _, num_entities, num_points, num_dofs = table.shape Id = np.eye(num_points) return num_points == num_dofs and all( np.allclose(table[0, i, :, :], Id, rtol=rtol, atol=atol) for i in range(num_entities) ) def is_permuted_table(table, rtol=default_rtol, atol=default_atol): """Check if table is permuted.""" return not all( np.allclose(table[0, :, :, :], table[i, :, :, :], rtol=rtol, atol=atol) for i in range(1, table.shape[0]) ) def is_piecewise_table(table, rtol=default_rtol, atol=default_atol): """Check if table is piecewise.""" return all( np.allclose(table[0, :, 0, :], table[0, :, i, :], rtol=rtol, atol=atol) for i in range(1, table.shape[2]) ) def is_uniform_table(table, rtol=default_rtol, atol=default_atol): """Check if table is uniform.""" return all( np.allclose(table[0, 0, :, :], table[0, i, :, :], rtol=rtol, atol=atol) for i in range(1, table.shape[1]) ) def analyse_table_type(table, rtol=default_rtol, atol=default_atol): """Analyse table type.""" if is_zeros_table(table, rtol=rtol, atol=atol): # Table is empty or all values are 0.0 ttype = "zeros" elif is_ones_table(table, rtol=rtol, atol=atol): # All values are 1.0 ttype = "ones" elif is_quadrature_table(table, rtol=rtol, atol=atol): # Identity matrix mapping points to dofs (separately on each entity) ttype = "quadrature" else: # Equal for all points on a given entity piecewise = is_piecewise_table(table, rtol=rtol, atol=atol) uniform = is_uniform_table(table, rtol=rtol, atol=atol) if piecewise and uniform: # Constant for all points and all entities ttype = "fixed" elif piecewise: # Constant for all points on each entity separately ttype = "piecewise" elif uniform: # Equal on all entities ttype = "uniform" else: # Varying over points and entities ttype = "varying" return ttype ffcx-0.9.0/ffcx/ir/integral.py000066400000000000000000000340361470142666300162040ustar00rootroot00000000000000# Copyright (C) 2013-2020 Martin Sandve Alnæs and Michal Habera # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Main algorithm for building the integral intermediate representation.""" import collections import itertools import logging import typing import numpy as np import ufl from ufl.algorithms.balancing import balance_modifiers from ufl.checks import is_cellwise_constant from ufl.classes import QuadratureWeight from ffcx.ir.analysis.factorization import compute_argument_factorization from ffcx.ir.analysis.graph import build_scalar_graph from ffcx.ir.analysis.modified_terminals import analyse_modified_terminal, is_modified_terminal from ffcx.ir.analysis.visualise import visualise_graph from ffcx.ir.elementtables import UniqueTableReferenceT, build_optimized_tables logger = logging.getLogger("ffcx") class ModifiedArgumentDataT(typing.NamedTuple): """Modified argument data.""" ma_index: int tabledata: UniqueTableReferenceT class BlockDataT(typing.NamedTuple): """Block data.""" ttypes: tuple[str, ...] # list of table types for each block rank factor_indices_comp_indices: list[tuple[int, int]] # list of (factor index, component index) all_factors_piecewise: bool # True if all factors for this block are piecewise unames: tuple[str, ...] # list of unique FE table names for each block rank restrictions: tuple[str, ...] # restriction "+" | "-" | None for each block rank transposed: bool # block is the transpose of another is_uniform: bool ma_data: tuple[ModifiedArgumentDataT, ...] # used in "full", "safe" and "partial" is_permuted: bool # Do quad points on facets need to be permuted? def compute_integral_ir(cell, integral_type, entity_type, integrands, argument_shape, p, visualise): """Compute intermediate representation for an integral.""" # The intermediate representation dict we're building and returning # here ir = {} # Shared unique tables for all quadrature loops ir["unique_tables"] = {} ir["unique_table_types"] = {} ir["integrand"] = {} for quadrature_rule, integrand in integrands.items(): expression = integrand # Rebalance order of nested terminal modifiers expression = balance_modifiers(expression) # Remove QuadratureWeight terminals from expression and replace with 1.0 expression = replace_quadratureweight(expression) # Build initial scalar list-based graph representation S = build_scalar_graph(expression) # Build terminal_data from V here before factorization. Then we # can use it to derive table properties for all modified # terminals, and then use that to rebuild the scalar graph more # efficiently before argument factorization. We can build # terminal_data again after factorization if that's necessary. initial_terminals = { i: analyse_modified_terminal(v["expression"]) for i, v in S.nodes.items() if is_modified_terminal(v["expression"]) } # Check if we have a mixed-dimensional integral is_mixed_dim = False for domain in ufl.domain.extract_domains(integrand): if domain.topological_dimension() != cell.topological_dimension(): is_mixed_dim = True mt_table_reference = build_optimized_tables( quadrature_rule, cell, integral_type, entity_type, initial_terminals.values(), ir["unique_tables"], use_sum_factorization=p["sum_factorization"], is_mixed_dim=is_mixed_dim, rtol=p["table_rtol"], atol=p["table_atol"], ) # Fetch unique tables for this quadrature rule table_types = {v.name: v.ttype for v in mt_table_reference.values()} tables = {v.name: v.values for v in mt_table_reference.values()} S_targets = [i for i, v in S.nodes.items() if v.get("target", False)] num_components = np.int32(np.prod(expression.ufl_shape)) if "zeros" in table_types.values(): # If there are any 'zero' tables, replace symbolically and rebuild graph for i, mt in initial_terminals.items(): # Set modified terminals with zero tables to zero tr = mt_table_reference.get(mt) if tr is not None and tr.ttype == "zeros": S.nodes[i]["expression"] = ufl.as_ufl(0.0) # Propagate expression changes using dependency list for i, v in S.nodes.items(): deps = [S.nodes[j]["expression"] for j in S.out_edges[i]] if deps: v["expression"] = v["expression"]._ufl_expr_reconstruct_(*deps) # Recreate expression with correct ufl_shape expressions = [ None, ] * num_components for target in S_targets: for comp in S.nodes[target]["component"]: assert expressions[comp] is None expressions[comp] = S.nodes[target]["expression"] expression = ufl.as_tensor(np.reshape(expressions, expression.ufl_shape)) # Rebuild scalar list-based graph representation S = build_scalar_graph(expression) # Output diagnostic graph as pdf if visualise: visualise_graph(S, "S.pdf") # Compute factorization of arguments rank = len(argument_shape) F = compute_argument_factorization(S, rank) # Get the 'target' nodes that are factors of arguments, and insert in dict FV_targets = [i for i, v in F.nodes.items() if v.get("target", False)] argument_factorization = {} for fi in FV_targets: # Number of blocks using this factor must agree with number of components # to which this factor contributes. I.e. there are more blocks iff there are more # components assert len(F.nodes[fi]["target"]) == len(F.nodes[fi]["component"]) k = 0 for w in F.nodes[fi]["target"]: comp = F.nodes[fi]["component"][k] argument_factorization[w] = argument_factorization.get(w, []) # Store tuple of (factor index, component index) argument_factorization[w].append((fi, comp)) k += 1 # Get list of indices in F which are the arguments (should be at start) argkeys = set() for w in argument_factorization: argkeys = argkeys | set(w) argkeys = list(argkeys) # Build set of modified_terminals for each mt factorized vertex in F # and attach tables, if appropriate for i, v in F.nodes.items(): expr = v["expression"] if is_modified_terminal(expr): mt = analyse_modified_terminal(expr) F.nodes[i]["mt"] = mt tr = mt_table_reference.get(mt) if tr is not None: F.nodes[i]["tr"] = tr # Attach 'status' to each node: 'inactive', 'piecewise' or 'varying' analyse_dependencies(F, mt_table_reference) # Output diagnostic graph as pdf if visualise: visualise_graph(F, "F.pdf") # Loop over factorization terms block_contributions = collections.defaultdict(list) for ma_indices, fi_ci in sorted(argument_factorization.items()): # Get a bunch of information about this term assert rank == len(ma_indices) trs = tuple(F.nodes[ai]["tr"] for ai in ma_indices) unames = tuple(tr.name for tr in trs) ttypes = tuple(tr.ttype for tr in trs) assert not any(tt == "zeros" for tt in ttypes) blockmap = [] for tr in trs: begin = tr.offset num_dofs = tr.values.shape[3] dofmap = tuple(begin + i * tr.block_size for i in range(num_dofs)) blockmap.append(dofmap) blockmap = tuple(blockmap) block_is_uniform = all(tr.is_uniform for tr in trs) # Collect relevant restrictions to identify blocks correctly # in interior facet integrals block_restrictions = [] for i, ai in enumerate(ma_indices): if trs[i].is_uniform: r = None else: r = F.nodes[ai]["mt"].restriction block_restrictions.append(r) block_restrictions = tuple(block_restrictions) # Check if each *each* factor corresponding to this argument is piecewise all_factors_piecewise = all(F.nodes[ifi[0]]["status"] == "piecewise" for ifi in fi_ci) block_is_permuted = False for name in unames: if tables[name].shape[0] > 1: block_is_permuted = True ma_data = [] for i, ma in enumerate(ma_indices): ma_data.append(ModifiedArgumentDataT(ma, trs[i])) block_is_transposed = False # FIXME: Handle transposes for these block types block_unames = unames blockdata = BlockDataT( ttypes, fi_ci, all_factors_piecewise, block_unames, block_restrictions, block_is_transposed, block_is_uniform, tuple(ma_data), block_is_permuted, ) # Insert in expr_ir for this quadrature loop block_contributions[blockmap].append(blockdata) # Figure out which table names are referenced active_table_names = set() for i, v in F.nodes.items(): tr = v.get("tr") if tr is not None and F.nodes[i]["status"] != "inactive": if tr.has_tensor_factorisation: for t in tr.tensor_factors: active_table_names.add(t.name) else: active_table_names.add(tr.name) # Figure out which table names are referenced in blocks for blockmap, contributions in itertools.chain(block_contributions.items()): for blockdata in contributions: for mad in blockdata.ma_data: if mad.tabledata.has_tensor_factorisation: for t in mad.tabledata.tensor_factors: active_table_names.add(t.name) else: active_table_names.add(mad.tabledata.name) active_tables = {} active_table_types = {} for name in active_table_names: # Drop tables not referenced from modified terminals if table_types[name] not in ("zeros", "ones"): active_tables[name] = tables[name] active_table_types[name] = table_types[name] # Add tables and types for this quadrature rule to global tables dict ir["unique_tables"].update(active_tables) ir["unique_table_types"].update(active_table_types) # Build IR dict for the given expressions # Store final ir for this num_points ir["integrand"][quadrature_rule] = { "factorization": F, "modified_arguments": [F.nodes[i]["mt"] for i in argkeys], "block_contributions": block_contributions, } restrictions = [i.restriction for i in initial_terminals.values()] ir["needs_facet_permutations"] = ( "+" in restrictions and "-" in restrictions ) or is_mixed_dim return ir def analyse_dependencies(F, mt_unique_table_reference): """Analyse dependencies. Sets 'status' of all nodes to either: 'inactive', 'piecewise' or 'varying' Children of 'target' nodes are either 'piecewise' or 'varying'. All other nodes are 'inactive'. Varying nodes are identified by their tables ('tr'). All their parent nodes are also set to 'varying' - any remaining active nodes are 'piecewise'. """ # Set targets, and dependencies to 'active' targets = [i for i, v in F.nodes.items() if v.get("target")] for _, v in F.nodes.items(): v["status"] = "inactive" while targets: s = targets.pop() F.nodes[s]["status"] = "active" for j in F.out_edges[s]: if F.nodes[j]["status"] == "inactive": targets.append(j) # Build piecewise/varying markers for factorized_vertices varying_ttypes = ("varying", "quadrature", "uniform") varying_indices = [] for i, v in F.nodes.items(): if v.get("mt") is None: continue tr = v.get("tr") if tr is not None: ttype = tr.ttype # Check if table computations have revealed values varying over points if ttype in varying_ttypes: varying_indices.append(i) else: if ttype not in ("fixed", "piecewise", "ones", "zeros"): raise RuntimeError(f"Invalid ttype {ttype}.") elif not is_cellwise_constant(v["expression"]): raise RuntimeError("Error " + str(tr)) # Keeping this check to be on the safe side, # not sure which cases this will cover (if any) # varying_indices.append(i) # Set all parents of active varying nodes to 'varying' while varying_indices: s = varying_indices.pop() if F.nodes[s]["status"] == "active": F.nodes[s]["status"] = "varying" for j in F.in_edges[s]: varying_indices.append(j) # Any remaining active nodes must be 'piecewise' for _, v in F.nodes.items(): if v["status"] == "active": v["status"] = "piecewise" def replace_quadratureweight(expression): """Remove any QuadratureWeight terminals and replace with 1.0.""" r = [] for node in ufl.corealg.traversal.unique_pre_traversal(expression): if is_modified_terminal(node) and isinstance(node, QuadratureWeight): r.append(node) replace_map = {q: 1.0 for q in r} return ufl.algorithms.replace(expression, replace_map) ffcx-0.9.0/ffcx/ir/representation.py000066400000000000000000000520271470142666300174410ustar00rootroot00000000000000# Copyright (C) 2009-2020 Anders Logg, Martin Sandve Alnæs, Marie E. Rognes, # Kristian B. Oelgaard, Matthew W. Scroggs, Chris Richardson, and others # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Compiler stage 2: Code representation. Module computes intermediate representations of forms. For each UFC function, we extract the data needed for code generation at a later stage. The representation should conform strictly to the naming and order of functions in UFC. Thus, for code generation of the function "foo", one should only need to use the data stored in the intermediate representation under the key "foo". """ from __future__ import annotations import itertools import logging import typing import warnings import numpy as np import numpy.typing as npt import ufl from ufl.classes import Integral from ufl.sorting import sorted_expr_sum from ffcx import naming from ffcx.analysis import UFLData from ffcx.ir.integral import compute_integral_ir from ffcx.ir.representationutils import QuadratureRule, create_quadrature_points_and_weights logger = logging.getLogger("ffcx") class FormIR(typing.NamedTuple): """Intermediate representation of a form.""" id: int name: str signature: str rank: int num_coefficients: int num_constants: int name_from_uflfile: str original_coefficient_positions: list[int] coefficient_names: list[str] constant_names: list[str] finite_element_hashes: list[int] integral_names: dict[str, list[str]] subdomain_ids: dict[str, list[int]] class QuadratureIR(typing.NamedTuple): """Intermediate representation of a quadrature rule.""" cell_shape: str points: npt.NDArray[np.float64] weights: npt.NDArray[np.float64] class CommonExpressionIR(typing.NamedTuple): """Common-ground for IntegralIR and ExpressionIR.""" integral_type: str entity_type: str tensor_shape: list[int] coefficient_numbering: dict[ufl.Coefficient, int] coefficient_offsets: dict[ufl.Coefficient, int] original_constant_offsets: dict[ufl.Constant, int] unique_tables: dict[str, npt.NDArray[np.float64]] unique_table_types: dict[str, str] integrand: dict[QuadratureRule, dict] name: str needs_facet_permutations: bool shape: list[int] class IntegralIR(typing.NamedTuple): """Intermediate representation of an integral.""" expression: CommonExpressionIR rank: int enabled_coefficients: list[bool] coordinate_element_hash: str class ExpressionIR(typing.NamedTuple): """Intermediate representation of a DOLFINx Expression.""" expression: CommonExpressionIR original_coefficient_positions: list[int] coefficient_names: list[str] constant_names: list[str] name_from_uflfile: str class DataIR(typing.NamedTuple): """Intermediate representation of data.""" integrals: list[IntegralIR] forms: list[FormIR] expressions: list[ExpressionIR] def compute_ir( analysis: UFLData, object_names: dict[int, str], prefix: str, options: dict[str, npt.DTypeLike | int | float], visualise: bool, ) -> DataIR: """Compute intermediate representation.""" logger.info(79 * "*") logger.info("Compiler stage 2: Computing intermediate representation of objects") logger.info(79 * "*") # Compute object names # NOTE: This is done here for performance reasons, because repeated calls # within each IR computation would be expensive due to UFL signature computations finite_element_hashes = {e: e.basix_hash() for e in analysis.unique_elements} integral_names = {} form_names = {} for fd_index, fd in enumerate(analysis.form_data): form_names[fd_index] = naming.form_name(fd.original_form, fd_index, prefix) for itg_index, itg_data in enumerate(fd.integral_data): integral_names[(fd_index, itg_index)] = naming.integral_name( fd.original_form, itg_data.integral_type, fd_index, itg_data.subdomain_id, prefix ) irs = [ _compute_integral_ir( fd, i, analysis.element_numbers, integral_names, finite_element_hashes, options, visualise, ) for (i, fd) in enumerate(analysis.form_data) ] ir_integrals = list(itertools.chain(*irs)) ir_forms = [ _compute_form_ir( fd, i, prefix, form_names, integral_names, analysis.element_numbers, finite_element_hashes, object_names, ) for (i, fd) in enumerate(analysis.form_data) ] ir_expressions = [ _compute_expression_ir( expr, i, prefix, analysis, options, visualise, object_names, finite_element_hashes, ) for i, expr in enumerate(analysis.expressions) ] return DataIR( integrals=ir_integrals, forms=ir_forms, expressions=ir_expressions, ) def _compute_integral_ir( form_data, form_index, element_numbers, integral_names, finite_element_hashes, options, visualise, ) -> list[IntegralIR]: """Compute intermediate representation for form integrals.""" _entity_types = { "cell": "cell", "exterior_facet": "facet", "interior_facet": "facet", "vertex": "vertex", "custom": "cell", } # Iterate over groups of integrals irs = [] for itg_data_index, itg_data in enumerate(form_data.integral_data): logger.info(f"Computing IR for integral in integral group {itg_data_index}") expression_ir = {} # Compute representation entity_type = _entity_types[itg_data.integral_type] cell = itg_data.domain.ufl_cell() cellname = cell.cellname() tdim = cell.topological_dimension() assert all(tdim == itg.ufl_domain().topological_dimension() for itg in itg_data.integrals) expression_ir = { "integral_type": itg_data.integral_type, "entity_type": entity_type, "shape": (), } ir = { "rank": form_data.rank, "enabled_coefficients": itg_data.enabled_coefficients, "coordinate_element_hash": finite_element_hashes[ itg_data.domain.ufl_coordinate_element() ], } # Get element space dimensions unique_elements = element_numbers.keys() element_dimensions = { element: element.dim + element.num_global_support_dofs for element in unique_elements } # Create dimensions of primary indices, needed to reset the argument # 'A' given to tabulate_tensor() by the assembler. argument_dimensions = [ element_dimensions[element] for element in form_data.argument_elements ] # Compute shape of element tensor if expression_ir["integral_type"] == "interior_facet": expression_ir["tensor_shape"] = [2 * dim for dim in argument_dimensions] else: expression_ir["tensor_shape"] = argument_dimensions integral_type = itg_data.integral_type cell = itg_data.domain.ufl_cell() # Group integrands with the same quadrature rule grouped_integrands: dict[QuadratureRule, list[ufl.core.expr.Expr]] = {} use_sum_factorization = options["sum_factorization"] and itg_data.integral_type == "cell" for integral in itg_data.integrals: md = integral.metadata() or {} scheme = md["quadrature_rule"] tensor_factors = None if scheme == "custom": points = md["quadrature_points"] weights = md["quadrature_weights"] elif scheme == "vertex": # FIXME: Could this come from basix? # The vertex scheme, i.e., averaging the function value in the # vertices and multiplying with the simplex volume, is only of # order 1 and inferior to other generic schemes in terms of # error reduction. Equation systems generated with the vertex # scheme have some properties that other schemes lack, e.g., the # mass matrix is a simple diagonal matrix. This may be # prescribed in certain cases. degree = md["quadrature_degree"] if integral_type != "cell": facet_types = cell.facet_types() assert len(facet_types) == 1 cellname = facet_types[0].cellname() if degree > 1: warnings.warn( "Explicitly selected vertex quadrature (degree 1), " f"but requested degree is {degree}." ) if cellname == "tetrahedron": points, weights = ( np.array( [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] ), np.array([1.0 / 24.0, 1.0 / 24.0, 1.0 / 24.0, 1.0 / 24.0]), ) elif cellname == "triangle": points, weights = ( np.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]]), np.array([1.0 / 6.0, 1.0 / 6.0, 1.0 / 6.0]), ) elif cellname == "interval": # Trapezoidal rule points, weights = (np.array([[0.0], [1.0]]), np.array([1.0 / 2.0, 1.0 / 2.0])) elif cellname == "quadrilateral": points, weights = ( np.array([[0.0, 0], [1.0, 0.0], [0.0, 1.0], [1.0, 1]]), np.array([1.0 / 4.0, 1.0 / 4.0, 1.0 / 4.0, 1.0 / 4.0]), ) elif cellname == "hexahedron": points, weights = ( np.array( [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [1.0, 1.0, 1.0], ] ), np.array( [ 1.0 / 8.0, 1.0 / 8.0, 1.0 / 8.0, 1.0 / 8.0, 1.0 / 8.0, 1.0 / 8.0, 1.0 / 8.0, 1.0 / 8.0, ] ), ) else: raise RuntimeError(f"Vertex scheme is not supported for cell: {cellname}") else: degree = md["quadrature_degree"] points, weights, tensor_factors = create_quadrature_points_and_weights( integral_type, cell, degree, scheme, form_data.argument_elements, use_sum_factorization, ) points = np.asarray(points) weights = np.asarray(weights) rule = QuadratureRule(points, weights, tensor_factors) if rule not in grouped_integrands: grouped_integrands[rule] = [] grouped_integrands[rule].append(integral.integrand()) sorted_integrals: dict[QuadratureRule, Integral] = {} for rule, integrands in grouped_integrands.items(): integrands_summed = sorted_expr_sum(integrands) integral_new = Integral( integrands_summed, itg_data.integral_type, itg_data.domain, itg_data.subdomain_id, {}, None, ) sorted_integrals[rule] = integral_new # TODO: See if coefficient_numbering can be removed # Build coefficient numbering for UFC interface here, to avoid # renumbering in UFL and application of replace mapping coefficient_numbering = {} for i, f in enumerate(form_data.reduced_coefficients): coefficient_numbering[f] = i # Add coefficient numbering to IR expression_ir["coefficient_numbering"] = coefficient_numbering index_to_coeff = sorted([(v, k) for k, v in coefficient_numbering.items()]) offsets = {} width = 2 if integral_type in ("interior_facet") else 1 _offset = 0 for k, el in zip(index_to_coeff, form_data.coefficient_elements): offsets[k[1]] = _offset _offset += width * element_dimensions[el] # Copy offsets also into IR expression_ir["coefficient_offsets"] = offsets # Build offsets for Constants original_constant_offsets = {} _offset = 0 for constant in form_data.original_form.constants(): original_constant_offsets[constant] = _offset _offset += np.prod(constant.ufl_shape, dtype=int) expression_ir["original_constant_offsets"] = original_constant_offsets # Create map from number of quadrature points -> integrand integrand_map: dict[QuadratureRule, ufl.core.expr.Expr] = { rule: integral.integrand() for rule, integral in sorted_integrals.items() } # Build more specific intermediate representation integral_ir = compute_integral_ir( itg_data.domain.ufl_cell(), itg_data.integral_type, expression_ir["entity_type"], integrand_map, expression_ir["tensor_shape"], options, visualise, ) expression_ir.update(integral_ir) # Fetch name expression_ir["name"] = integral_names[(form_index, itg_data_index)] ir["expression"] = CommonExpressionIR(**expression_ir) irs.append(IntegralIR(**ir)) return irs def _compute_form_ir( form_data, form_id, prefix, form_names, integral_names, element_numbers, finite_element_hashes, object_names, ) -> FormIR: """Compute intermediate representation of form.""" logger.info(f"Computing IR for form {form_id}") # Store id ir = {"id": form_id} # Compute common data ir["name"] = form_names[form_id] ir["signature"] = form_data.original_form.signature() ir["rank"] = len(form_data.original_form.arguments()) ir["num_coefficients"] = len(form_data.reduced_coefficients) ir["num_constants"] = len(form_data.original_form.constants()) ir["coefficient_names"] = [ object_names.get(id(obj), f"w{j}") for j, obj in enumerate(form_data.reduced_coefficients) ] ir["constant_names"] = [ object_names.get(id(obj), f"c{j}") for j, obj in enumerate(form_data.original_form.constants()) ] ir["original_coefficient_positions"] = form_data.original_coefficient_positions ir["finite_element_hashes"] = [ finite_element_hashes[e] for e in form_data.argument_elements + form_data.coefficient_elements ] form_name = object_names.get(id(form_data.original_form), form_id) ir["name_from_uflfile"] = f"form_{prefix}_{form_name}" # Store names of integrals and subdomain_ids for this form, grouped # by integral types since form points to all integrals it contains, # it has to know their names for codegen phase ir["integral_names"] = {} ir["subdomain_ids"] = {} ufcx_integral_types = ("cell", "exterior_facet", "interior_facet") ir["subdomain_ids"] = {itg_type: [] for itg_type in ufcx_integral_types} ir["integral_names"] = {itg_type: [] for itg_type in ufcx_integral_types} for itg_index, itg_data in enumerate(form_data.integral_data): # UFL is using "otherwise" for default integrals (over whole mesh) # but FFCx needs integers, so otherwise = -1 integral_type = itg_data.integral_type subdomain_ids = [sid if sid != "otherwise" else -1 for sid in itg_data.subdomain_id] if min(subdomain_ids) < -1: raise ValueError("Integral subdomain IDs must be non-negative.") ir["subdomain_ids"][integral_type] += subdomain_ids for _ in range(len(subdomain_ids)): ir["integral_names"][integral_type] += [integral_names[(form_id, itg_index)]] return FormIR(**ir) def _compute_expression_ir( expression, index, prefix, analysis, options, visualise, object_names, finite_element_hashes, ): """Compute intermediate representation of expression.""" logger.info(f"Computing IR for expression {index}") # Compute representation ir = {} base_ir = {} original_expression = (expression[2], expression[1]) base_ir["name"] = naming.expression_name(original_expression, prefix) original_expression = expression[2] points = expression[1] expression = expression[0] try: cell = ufl.domain.extract_unique_domain(expression).ufl_cell() except AttributeError: # This case corresponds to a spatially constant expression # without any dependencies cell = None # Prepare dimensions of all unique element in expression, including # elements for arguments, coefficients and coordinate mappings element_dimensions = { element: element.dim + element.num_global_support_dofs for element in analysis.unique_elements } # Extract dimensions for elements of arguments only arguments = ufl.algorithms.extract_arguments(expression) argument_elements = tuple(f.ufl_function_space().ufl_element() for f in arguments) argument_dimensions = [element_dimensions[element] for element in argument_elements] tensor_shape = argument_dimensions base_ir["tensor_shape"] = tensor_shape base_ir["shape"] = list(expression.ufl_shape) coefficients = ufl.algorithms.extract_coefficients(expression) coefficient_numbering = {} for i, coeff in enumerate(coefficients): coefficient_numbering[coeff] = i # Add coefficient numbering to IR base_ir["coefficient_numbering"] = coefficient_numbering original_coefficient_positions = [] original_coefficients = ufl.algorithms.extract_coefficients(original_expression) for coeff in coefficients: original_coefficient_positions.append(original_coefficients.index(coeff)) ir["coefficient_names"] = [ object_names.get(id(obj), f"w{j}") for j, obj in enumerate(coefficients) ] ir["constant_names"] = [ object_names.get(id(obj), f"c{j}") for j, obj in enumerate(ufl.algorithms.analysis.extract_constants(expression)) ] expression_name = object_names.get(id(original_expression), index) ir["name_from_uflfile"] = f"expression_{prefix}_{expression_name}" if len(argument_elements) > 1: raise RuntimeError("Expression with more than one Argument not implemented.") ir["original_coefficient_positions"] = original_coefficient_positions coefficient_elements = tuple(f.ufl_element() for f in coefficients) offsets = {} _offset = 0 for i, el in enumerate(coefficient_elements): offsets[coefficients[i]] = _offset _offset += element_dimensions[el] # Copy offsets also into IR base_ir["coefficient_offsets"] = offsets base_ir["integral_type"] = "expression" if cell is not None: if (tdim := cell.topological_dimension()) == (pdim := points.shape[1]): base_ir["entity_type"] = "cell" elif tdim - 1 == pdim: base_ir["entity_type"] = "facet" else: raise ValueError( f"Expression on domain with topological dimension {tdim}" + f"with points of dimension {pdim} not supported." ) else: # For spatially invariant expressions, all expressions are evaluated in the cell base_ir["entity_type"] = "cell" # Build offsets for Constants original_constant_offsets = {} _offset = 0 for constant in ufl.algorithms.analysis.extract_constants(original_expression): original_constant_offsets[constant] = _offset _offset += np.prod(constant.ufl_shape, dtype=int) base_ir["original_constant_offsets"] = original_constant_offsets weights = np.array([1.0] * points.shape[0]) rule = QuadratureRule(points, weights) integrands = {rule: expression} if cell is None: assert ( len(ir["original_coefficient_positions"]) == 0 and len(base_ir["original_constant_offsets"]) == 0 ) expression_ir = compute_integral_ir( cell, base_ir["integral_type"], base_ir["entity_type"], integrands, tensor_shape, options, visualise, ) base_ir.update(expression_ir) ir["expression"] = CommonExpressionIR(**base_ir) return ExpressionIR(**ir) ffcx-0.9.0/ffcx/ir/representationutils.py000066400000000000000000000107621470142666300205220ustar00rootroot00000000000000# Copyright (C) 2012-2017 Marie Rognes # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Utility functions for some code shared between representations.""" import hashlib import itertools import logging import numpy as np import ufl from ffcx.element_interface import create_quadrature, map_facet_points, reference_cell_vertices logger = logging.getLogger("ffcx") class QuadratureRule: """A quadrature rule.""" def __init__(self, points, weights, tensor_factors=None): """Initialise.""" self.points = np.ascontiguousarray(points) # TODO: change basix to make this unnecessary self.weights = weights self.tensor_factors = tensor_factors self.has_tensor_factors = tensor_factors is not None self._hash = None def __hash__(self): """Hash.""" if self._hash is None: self.hash_obj = hashlib.sha1(self.points) self._hash = int(self.hash_obj.hexdigest(), 32) return self._hash def __eq__(self, other): """Check equality.""" return np.allclose(self.points, other.points) and np.allclose(self.weights, other.weights) def id(self): """Return unique deterministic identifier. Note: This identifier is used to provide unique names to tables and symbols in generated code. """ return self.hash_obj.hexdigest()[-3:] def create_quadrature_points_and_weights( integral_type, cell, degree, rule, elements, use_tensor_product=False ): """Create quadrature rule and return points and weights.""" pts = None wts = None tensor_factors = None if integral_type == "cell": if cell.cellname() in ["quadrilateral", "hexahedron"] and use_tensor_product: if cell.cellname() == "quadrilateral": tensor_factors = [ create_quadrature("interval", degree, rule, elements) for _ in range(2) ] elif cell.cellname() == "hexahedron": tensor_factors = [ create_quadrature("interval", degree, rule, elements) for _ in range(3) ] pts = np.array( [tuple(i[0] for i in p) for p in itertools.product(*[f[0] for f in tensor_factors])] ) wts = np.array([np.prod(p) for p in itertools.product(*[f[1] for f in tensor_factors])]) else: pts, wts = create_quadrature(cell.cellname(), degree, rule, elements) elif integral_type in ufl.measure.facet_integral_types: facet_types = cell.facet_types() # Raise exception for cells with more than one facet type e.g. prisms if len(facet_types) > 1: raise Exception(f"Cell type {cell} not supported for integral type {integral_type}.") pts, wts = create_quadrature(facet_types[0].cellname(), degree, rule, elements) elif integral_type in ufl.measure.point_integral_types: pts, wts = create_quadrature("vertex", degree, rule, elements) elif integral_type == "expression": pass else: logging.exception(f"Unknown integral type: {integral_type}") return pts, wts, tensor_factors def integral_type_to_entity_dim(integral_type, tdim): """Given integral_type and domain tdim, return the tdim of the integration entity.""" if integral_type == "cell": entity_dim = tdim elif integral_type in ufl.measure.facet_integral_types: entity_dim = tdim - 1 elif integral_type in ufl.measure.point_integral_types: entity_dim = 0 elif integral_type in ufl.custom_integral_types: entity_dim = tdim elif integral_type == "expression": entity_dim = tdim else: raise RuntimeError(f"Unknown integral_type: {integral_type}") return entity_dim def map_integral_points(points, integral_type, cell, entity): """Map points from reference entity to its parent reference cell.""" tdim = cell.topological_dimension() entity_dim = integral_type_to_entity_dim(integral_type, tdim) if entity_dim == tdim: assert points.shape[1] == tdim assert entity == 0 return np.asarray(points) elif entity_dim == tdim - 1: assert points.shape[1] == tdim - 1 return np.asarray(map_facet_points(points, entity, cell.cellname())) elif entity_dim == 0: return np.asarray([reference_cell_vertices(cell.cellname())[entity]]) else: raise RuntimeError(f"Can't map points from entity_dim={entity_dim}") ffcx-0.9.0/ffcx/main.py000066400000000000000000000056501470142666300147110ustar00rootroot00000000000000# Copyright (C) 2004-2020 Anders Logg, Garth N. Wells and Michal Habera # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Command-line interface to FFCx. Parse command-line arguments and generate code from input UFL form files. """ import argparse import cProfile import logging import pathlib import re import string import ufl from ffcx import __version__ as FFCX_VERSION from ffcx import compiler, formatting from ffcx.options import FFCX_DEFAULT_OPTIONS, get_options logger = logging.getLogger("ffcx") parser = argparse.ArgumentParser( description="FEniCS Form Compiler (FFCx, https://fenicsproject.org)" ) parser.add_argument("--version", action="version", version=f"%(prog)s (version {FFCX_VERSION})") parser.add_argument("-o", "--output-directory", type=str, default=".", help="output directory") parser.add_argument("--visualise", action="store_true", help="visualise the IR graph") parser.add_argument("-p", "--profile", action="store_true", help="enable profiling") # Add all options from FFCx option system for opt_name, (arg_type, opt_val, opt_desc, choices) in FFCX_DEFAULT_OPTIONS.items(): if isinstance(opt_val, bool): parser.add_argument( f"--{opt_name}", action="store_true", help=f"{opt_desc} (default={opt_val})" ) else: parser.add_argument( f"--{opt_name}", type=arg_type, choices=choices, help=f"{opt_desc} (default={opt_val})" ) parser.add_argument("ufl_file", nargs="+", help="UFL file(s) to be compiled") def main(args=None): """Run ffcx on a UFL file.""" xargs = parser.parse_args(args) # Parse all other options priority_options = {k: v for k, v in xargs.__dict__.items() if v is not None} options = get_options(priority_options) # Call parser and compiler for each file for filename in xargs.ufl_file: file = pathlib.Path(filename) # Remove weird characters (file system allows more than the C # preprocessor) prefix = file.stem prefix = re.subn("[^{}]".format(string.ascii_letters + string.digits + "_"), "!", prefix)[0] prefix = re.subn("!+", "_", prefix)[0] # Turn on profiling if xargs.profile: pr = cProfile.Profile() pr.enable() # Load UFL file ufd = ufl.algorithms.load_ufl_file(filename) # Generate code code_h, code_c = compiler.compile_ufl_objects( ufd.forms + ufd.expressions + ufd.elements, options=options, object_names=ufd.object_names, prefix=prefix, visualise=xargs.visualise, ) # Write to file formatting.write_code(code_h, code_c, prefix, xargs.output_directory) # Turn off profiling and write status to file if xargs.profile: pr.disable() pfn = f"ffcx_{prefix}.profile" pr.dump_stats(pfn) return 0 ffcx-0.9.0/ffcx/naming.py000066400000000000000000000067241470142666300152410ustar00rootroot00000000000000# Copyright (C) 2009-2020 Anders Logg and Michal Habera # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Naming.""" from __future__ import annotations import hashlib import numpy as np import numpy.typing as npt import ufl import ffcx import ffcx.codegeneration def compute_signature( ufl_objects: list[ufl.Form | tuple[ufl.core.expr.Expr, npt.NDArray[np.float64]]], tag: str, ) -> str: """Compute the signature hash. Based on the UFL type of the objects and an additional optional 'tag'. """ object_signature = "" for ufl_object in ufl_objects: # Get signature from ufl object if isinstance(ufl_object, ufl.Form): kind = "form" object_signature += ufl_object.signature() elif isinstance(ufl_object, tuple) and isinstance(ufl_object[0], ufl.core.expr.Expr): expr = ufl_object[0] points = ufl_object[1] # FIXME Move this to UFL, cache the computation coeffs = ufl.algorithms.extract_coefficients(expr) consts = ufl.algorithms.analysis.extract_constants(expr) args = ufl.algorithms.analysis.extract_arguments(expr) rn = dict() rn.update(dict((c, i) for i, c in enumerate(coeffs))) rn.update(dict((c, i) for i, c in enumerate(consts))) rn.update(dict((c, i) for i, c in enumerate(args))) domains: list[ufl.Mesh] = [] for coeff in coeffs: domains.append(*ufl.domain.extract_domains(coeff)) for arg in args: domains.append(*ufl.domain.extract_domains(arg)) for gc in ufl.algorithms.analysis.extract_type(expr, ufl.classes.GeometricQuantity): domains.append(*ufl.domain.extract_domains(gc)) for const in consts: domains.append(*ufl.domain.extract_domains(const)) domains = ufl.algorithms.analysis.unique_tuple(domains) rn.update(dict((d, i) for i, d in enumerate(domains))) # Hash on UFL signature and points signature = ufl.algorithms.signature.compute_expression_signature(expr, rn) object_signature += signature object_signature += repr(points) kind = "expression" else: raise RuntimeError(f"Unknown ufl object type {ufl_object.__class__.__name__}") # Build combined signature signatures = [ object_signature, str(ffcx.__version__), ffcx.codegeneration.get_signature(), kind, tag, ] string = ";".join(signatures) return hashlib.sha1(string.encode("utf-8")).hexdigest() def integral_name( original_form: ufl.form.Form, integral_type: str, form_id: int, subdomain_id: tuple[int, ...] | tuple[str], prefix: str, ) -> str: """Get integral name.""" sig = compute_signature([original_form], str((prefix, integral_type, form_id, subdomain_id))) return f"integral_{sig}" def form_name(original_form: ufl.form.Form, form_id: int, prefix: str) -> str: """Get form name.""" sig = compute_signature([original_form], str((prefix, form_id))) return f"form_{sig}" def expression_name( expression: tuple[ufl.core.expr.Expr, npt.NDArray[np.floating]], prefix: str ) -> str: """Get expression name.""" assert isinstance(expression[0], ufl.core.expr.Expr) sig = compute_signature([expression], prefix) return f"expression_{sig}" ffcx-0.9.0/ffcx/options.py000066400000000000000000000066601470142666300154620ustar00rootroot00000000000000# Copyright (C) 2005-2020 Anders Logg, Michal Habera, Jack S. Hale # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Options.""" from __future__ import annotations import functools import json import logging import os import os.path import pprint from pathlib import Path import numpy.typing as npt logger = logging.getLogger("ffcx") FFCX_DEFAULT_OPTIONS = { "epsilon": (float, 1e-14, "machine precision, used for dropping zero terms in tables.", None), "scalar_type": ( str, "float64", "scalar type to use in generated code.", ("float32", "float64", "complex64", "complex128"), ), "sum_factorization": (bool, False, "use sum factorization.", None), "table_rtol": ( float, 1e-6, "relative precision to use when comparing finite element table values for reuse.", None, ), "table_atol": ( float, 1e-9, "absolute precision to use when comparing finite element table values reuse.", None, ), "verbosity": ( int, 30, "logger verbosity, follows standard library levels, i.e. INFO=20, DEBUG=10, etc.", None, ), } @functools.cache def _load_options() -> tuple[dict, dict]: """Load options from JSON files.""" user_config_file = os.getenv("XDG_CONFIG_HOME", default=Path.home().joinpath(".config")) / Path( "ffcx", "ffcx_options.json" ) try: with open(user_config_file) as f: user_options = json.load(f) except FileNotFoundError: user_options = {} pwd_config_file = Path.cwd().joinpath("ffcx_options.json") try: with open(pwd_config_file) as f: pwd_options = json.load(f) except FileNotFoundError: pwd_options = {} return (user_options, pwd_options) def get_options( priority_options: dict[str, npt.DTypeLike | int | float] | None = None, ) -> dict[str, int | float | npt.DTypeLike]: """Return (a copy of) the merged option values for FFCX. Args: priority_options: take priority over all other option values (see notes) Returns: merged option values Note: This function sets the log level from the merged option values prior to returning. The `ffcx_options.json` files are cached on the first call. Subsequent calls to this function use this cache. Priority ordering of options from highest to lowest is: - **priority_options** (API and command line options) - **$PWD/ffcx_options.json** (local options) - **$XDG_CONFIG_HOME/ffcx/ffcx_options.json** (user options) - **FFCX_DEFAULT_OPTIONS** in `ffcx.options` `XDG_CONFIG_HOME` is `~/.config/` if the environment variable is not set. Example `ffcx_options.json` file: { "epsilon": 1e-7 } """ options: dict[str, npt.DTypeLike | int | float] = {} for opt, (_, value, _, _) in FFCX_DEFAULT_OPTIONS.items(): options[opt] = value # type: ignore # NOTE: _load_options uses functools.lru_cache user_options, pwd_options = _load_options() options.update(user_options) options.update(pwd_options) if priority_options is not None: options.update(priority_options) logger.setLevel(int(options["verbosity"])) # type: ignore logger.info("Final option values") logger.info(pprint.pformat(options)) return options ffcx-0.9.0/pyproject.toml000066400000000000000000000053351470142666300154010ustar00rootroot00000000000000[build-system] requires = ["setuptools>=62", "wheel"] build-backend = "setuptools.build_meta" [project] name = "fenics-ffcx" version = "0.9.0" description = "The FEniCSx Form Compiler" readme = "README.md" requires-python = ">=3.9" license = { file = "LICENSE" } authors = [ { email = "fenics-steering-council@googlegroups.com" }, { name = "FEniCS Steering Council" }, ] dependencies = [ "numpy>=1.21", "cffi", "setuptools; python_version >= '3.12'", # cffi with compilation support requires setuptools "fenics-basix >= 0.9.0, <0.10.0", "fenics-ufl >= 2024.2.0, <2024.3.0", ] [project.urls] homepage = "https://fenicsproject.org" repository = "https://github.com/fenics/ffcx.git" documentation = "https://docs.fenicsproject.org" [project.scripts] ffcx = "ffcx:__main__.main" [project.optional-dependencies] lint = ["ruff"] docs = ["sphinx", "sphinx_rtd_theme"] optional = ["numba", "pygraphviz==1.7"] test = ["pytest >= 6.0", "sympy", "numba"] ci = [ "coveralls", "coverage", "pytest-cov", "pytest-xdist", "types-setuptools", "mypy", "fenics-ffcx[docs]", "fenics-ffcx[lint]", "fenics-ffcx[test]", ] [tool.setuptools] packages = [ "ffcx", "ffcx.codegeneration", "ffcx.codegeneration.C", "ffcx.ir", "ffcx.ir.analysis", ] [tool.pytest.ini_options] minversion = "6.0" addopts = "-ra" testpaths = ["test"] norecursedirs = ["libs", "docs"] log_cli = true [tool.mypy] # Suggested at https://blog.wolt.com/engineering/2021/09/30/professional-grade-mypy-configuration/ # Goal would be to make all of the below True long-term disallow_untyped_defs = false disallow_any_unimported = false no_implicit_optional = false check_untyped_defs = false warn_return_any = false warn_unused_ignores = false show_error_codes = true [[tool.mypy.overrides]] module = ['basix', 'cffi', 'numba.*', 'pygraphviz', 'ufl.*'] ignore_missing_imports = true [tool.ruff] line-length = 100 indent-width = 4 [tool.ruff.format] docstring-code-format = true [tool.ruff.lint] select = [ # "N", # pep8-naming "E", # pycodestyle "W", # pycodestyle "D", # pydocstyle "F", # pyflakes "I", # isort "RUF", # Ruff-specific rules "UP", # pyupgrade "ICN", # flake8-import-conventions "NPY", # numpy-specific rules "FLY", # use f-string not static joins "LOG", # https://docs.astral.sh/ruff/rules/#flake8-logging-log # "ISC", # https://docs.astral.sh/ruff/rules/#flake8-implicit-str-concat-isc # "B", # https://docs.astral.sh/ruff/rules/#flake8-bugbear-b # "A", # https://docs.astral.sh/ruff/rules/#flake8-builtins-a ] ignore = ["RUF005", "RUF012", "RUF015"] [tool.ruff.lint.per-file-ignores] "test/*" = ["D"] [tool.ruff.lint.pydocstyle] convention = "google" ffcx-0.9.0/test/000077500000000000000000000000001470142666300134365ustar00rootroot00000000000000ffcx-0.9.0/test/Poisson.py000066400000000000000000000025201470142666300154410ustar00rootroot00000000000000# Copyright (C) 2004-2007 Anders Logg # # This file is part of FFCx. # # FFCx is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FFCx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with FFCx. If not, see . # """The bilinear form a(u, v) and linear form L(v) for Poisson's equation. Compile this form with FFCx: ffcx Poisson.ufl. """ import basix.ufl from ufl import ( Coefficient, Constant, FunctionSpace, Mesh, TestFunction, TrialFunction, dx, grad, inner, ) mesh = Mesh(basix.ufl.element("P", "triangle", 2, shape=(2,))) e = basix.ufl.element("Lagrange", "triangle", 2) space = FunctionSpace(mesh, e) u = TrialFunction(space) v = TestFunction(space) f = Coefficient(space) kappa1 = Constant(mesh, shape=(2, 2)) kappa2 = Constant(mesh, shape=(2, 2)) a = inner(kappa1, kappa2) * inner(grad(u), grad(v)) * dx L = f * v * dx ffcx-0.9.0/test/conftest.py000066400000000000000000000006211470142666300156340ustar00rootroot00000000000000# Copyright (C) 2020 Michal Habera # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later """Test configuration.""" import sys import pytest @pytest.fixture(scope="module") def compile_args(): """Compiler arguments.""" if sys.platform.startswith("win32"): return ["-Od"] else: return ["-O1", "-Wall", "-Werror"] ffcx-0.9.0/test/test_add_mode.py000066400000000000000000000125341470142666300166100ustar00rootroot00000000000000# Copyright (C) 2019 Chris Richardson # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import sys import basix.ufl import numpy as np import pytest import ufl import ffcx.codegeneration.jit from ffcx.codegeneration.utils import dtype_to_c_type, dtype_to_scalar_dtype @pytest.mark.parametrize( "dtype", [ "float32", "float64", pytest.param( "complex64", marks=pytest.mark.xfail( sys.platform.startswith("win32"), raises=NotImplementedError, reason="missing _Complex", ), ), pytest.param( "complex128", marks=pytest.mark.xfail( sys.platform.startswith("win32"), raises=NotImplementedError, reason="missing _Complex", ), ), ], ) def test_additive_facet_integral(dtype, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(u, v) * ufl.ds forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={"scalar_type": dtype}, cffi_extra_compile_args=compile_args ) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) ffi = module.ffi form0 = compiled_forms[0] integral_offsets = form0.form_integral_offsets ex = module.lib.exterior_facet assert integral_offsets[ex + 1] - integral_offsets[ex] == 1 integral_id = form0.form_integral_ids[integral_offsets[ex]] assert integral_id == -1 default_integral = form0.form_integrals[integral_offsets[ex]] A = np.zeros((3, 3), dtype=dtype) w = np.array([], dtype=dtype) c = np.array([], dtype=dtype) facets = np.array([0], dtype=np.int32) perm = np.array([0], dtype=np.uint8) xdtype = dtype_to_scalar_dtype(dtype) coords = np.array( [0.0, 2.0, 0.0, np.sqrt(3.0), -1.0, 0.0, -np.sqrt(3.0), -1.0, 0.0], dtype=xdtype ) kernel = getattr(default_integral, f"tabulate_tensor_{dtype}") c_type, c_xtype = dtype_to_c_type(dtype), dtype_to_c_type(xdtype) for i in range(3): facets[0] = i kernel( ffi.cast(f"{c_type} *", A.ctypes.data), ffi.cast(f"{c_type} *", w.ctypes.data), ffi.cast(f"{c_type} *", c.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.cast("int *", facets.ctypes.data), ffi.cast("uint8_t *", perm.ctypes.data), ) assert np.isclose(A.sum(), np.sqrt(12) * (i + 1)) @pytest.mark.parametrize( "dtype", [ "float32", "float64", pytest.param( "complex64", marks=pytest.mark.xfail( sys.platform.startswith("win32"), raises=NotImplementedError, reason="missing _Complex", ), ), pytest.param( "complex128", marks=pytest.mark.xfail( sys.platform.startswith("win32"), raises=NotImplementedError, reason="missing _Complex", ), ), ], ) def test_additive_cell_integral(dtype, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={"scalar_type": dtype}, cffi_extra_compile_args=compile_args ) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) ffi = module.ffi form0 = compiled_forms[0] cell = module.lib.cell offsets = form0.form_integral_offsets num_integrals = offsets[cell + 1] - offsets[cell] assert num_integrals == 1 integral_id = form0.form_integral_ids[offsets[cell]] assert integral_id == -1 default_integral = form0.form_integrals[offsets[cell]] A = np.zeros((3, 3), dtype=dtype) w = np.array([], dtype=dtype) c = np.array([], dtype=dtype) xdtype = dtype_to_scalar_dtype(dtype) coords = np.array( [0.0, 2.0, 0.0, np.sqrt(3.0), -1.0, 0.0, -np.sqrt(3.0), -1.0, 0.0], dtype=xdtype ) kernel = getattr(default_integral, f"tabulate_tensor_{dtype}") c_type, c_xtype = dtype_to_c_type(dtype), dtype_to_c_type(xdtype) kernel( ffi.cast(f"{c_type} *", A.ctypes.data), ffi.cast(f"{c_type} *", w.ctypes.data), ffi.cast(f"{c_type} *", c.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.NULL, ffi.NULL, ) A0 = np.array(A) for i in range(3): kernel( ffi.cast(f"{c_type} *", A.ctypes.data), ffi.cast(f"{c_type} *", w.ctypes.data), ffi.cast(f"{c_type} *", c.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.NULL, ffi.NULL, ) assert np.all(np.isclose(A, (i + 2) * A0)) ffcx-0.9.0/test/test_cache.py000066400000000000000000000022501470142666300161110ustar00rootroot00000000000000# Copyright (C) 2019 Chris Richardson # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import sys import basix.ufl import ufl import ffcx.codegeneration.jit def test_cache_modes(compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx forms = [a] # Load form from /tmp compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, cffi_extra_compile_args=compile_args ) tmpname = module.__name__ tmpfile = module.__file__ print(tmpname, tmpfile) del sys.modules[tmpname] # Load form from cache compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, cache_dir="./compile-cache", cffi_extra_compile_args=compile_args ) newname = module.__name__ newfile = module.__file__ print(newname, newfile) assert newname == tmpname assert newfile != tmpfile ffcx-0.9.0/test/test_cmdline.py000066400000000000000000000011771470142666300164700ustar00rootroot00000000000000# Copyright (C) 2018 Chris N. Richardson # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import os import os.path import subprocess import pytest def test_cmdline_simple(): os.chdir(os.path.dirname(__file__)) subprocess.run(["ffcx", "Poisson.py"]) def test_visualise(): try: import pygraphviz # noqa: F401 except ImportError: pytest.skip("pygraphviz not installed") os.chdir(os.path.dirname(__file__)) subprocess.run(["ffcx", "--visualise", "Poisson.py"]) assert os.path.isfile("S.pdf") assert os.path.isfile("F.pdf") ffcx-0.9.0/test/test_jit_expression.py000066400000000000000000000247541470142666300201300ustar00rootroot00000000000000# Copyright (C) 2019-2024 Michal Habera and Jørgen S. Dokken # # This file is part of FFCx.(https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import basix import basix.ufl import cffi import numpy as np import pytest import ufl import ffcx.codegeneration.jit def test_matvec(compile_args): """Test evaluation of linear rank-0 form. Evaluates expression c * A_ij * f_j where c is a Constant, A_ij is a user specified constant matrix and f_j is j-th component of user specified vector-valued finite element function (in P1 space). """ e = basix.ufl.element("P", "triangle", 1, shape=(2,)) mesh = ufl.Mesh(e) V = ufl.FunctionSpace(mesh, e) f = ufl.Coefficient(V) a_mat = np.array([[1.0, 2.0], [1.0, 1.0]]) a = ufl.as_matrix(a_mat) expr = ufl.Constant(mesh) * ufl.dot(a, f) points = np.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]]) obj, module, code = ffcx.codegeneration.jit.compile_expressions( [(expr, points)], cffi_extra_compile_args=compile_args ) ffi = cffi.FFI() expression = obj[0] dtype = np.float64 c_type = "double" xdtype = np.float64 c_xtype = "double" A = np.zeros((3, 2), dtype=dtype) f_mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) # Coefficient storage XYXYXY w = np.array(f_mat.T.flatten(), dtype=dtype) c = np.array([0.5], dtype=dtype) entity_index = np.array([0], dtype=np.intc) quad_perm = np.array([0], dtype=np.dtype("uint8")) # Coords storage XYZXYZXYZ coords = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=xdtype) expression.tabulate_tensor_float64( ffi.cast(f"{c_type} *", A.ctypes.data), ffi.cast(f"{c_type} *", w.ctypes.data), ffi.cast(f"{c_type} *", c.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.cast("int *", entity_index.ctypes.data), ffi.cast("uint8_t *", quad_perm.ctypes.data), ) # Check the computation against correct NumPy value assert np.allclose(A, 0.5 * np.dot(a_mat, f_mat).T) # Prepare NumPy array of points attached to the expression length = expression.num_points * expression.entity_dimension points_kernel = np.frombuffer( ffi.buffer(expression.points, length * ffi.sizeof("double")), np.double ) points_kernel = points_kernel.reshape(points.shape) assert np.allclose(points, points_kernel) # Check the value shape attached to the expression value_shape = np.frombuffer( ffi.buffer(expression.value_shape, expression.num_components * ffi.sizeof("int")), np.intc ) assert np.allclose(expr.ufl_shape, value_shape) def test_rank1(compile_args): """Tests evaluation of rank-1 form. Builds a linear operator which takes vector-valued functions in P1 space and evaluates expression [u_y, u_x] + grad(u_x) at specified points. """ e = basix.ufl.element("P", "triangle", 1, shape=(2,)) mesh = ufl.Mesh(e) V = ufl.FunctionSpace(mesh, e) u = ufl.TrialFunction(V) expr = ufl.as_vector([u[1], u[0]]) + ufl.grad(u[0]) points = np.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]]) obj, module, code = ffcx.codegeneration.jit.compile_expressions( [(expr, points)], cffi_extra_compile_args=compile_args ) ffi = cffi.FFI() expression = obj[0] dtype = np.float64 c_type = "double" xdtype = np.float64 c_xtype = "double" # 2 components for vector components of expression # 3 points of evaluation # 6 degrees of freedom for rank1 form A = np.zeros((3, 2, 6), dtype=dtype) # Coefficient storage XYXYXY w = np.array([0.0], dtype=dtype) c = np.array([0.0], dtype=dtype) entity_index = np.array([0], dtype=np.intc) quad_perm = np.array([0], dtype=np.dtype("uint8")) # Coords storage XYZXYZXYZ coords = np.zeros((points.shape[0], 3), dtype=xdtype) coords[:, :2] = points expression.tabulate_tensor_float64( ffi.cast(f"{c_type} *", A.ctypes.data), ffi.cast(f"{c_type} *", w.ctypes.data), ffi.cast(f"{c_type} *", c.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.cast("int *", entity_index.ctypes.data), ffi.cast("uint8_t *", quad_perm.ctypes.data), ) f = np.array([[1.0, 2.0, 3.0], [-4.0, -5.0, 6.0]]) # Apply the operator on some test input data u_ffcx = np.einsum("ijk,k", A, f.T.flatten()) # Compute the correct values using NumPy # Gradf0 is gradient of f[0], each component of the gradient is constant gradf0 = np.array( [ [f[0, 1] - f[0, 0], f[0, 1] - f[0, 0], f[0, 1] - f[0, 0]], [f[0, 2] - f[0, 0], f[0, 2] - f[0, 0], f[0, 2] - f[0, 0]], ] ) u_correct = np.array([f[1], f[0]]) + gradf0 assert np.allclose(u_ffcx, u_correct.T) def test_elimiate_zero_tables_tensor(compile_args): """Test elimination of tensor-valued expressions with zero tables""" cell = "tetrahedron" c_el = basix.ufl.element("P", cell, 1, shape=(3,)) mesh = ufl.Mesh(c_el) e = basix.ufl.element("P", cell, 1) V = ufl.FunctionSpace(mesh, e) u = ufl.Coefficient(V) expr = ufl.sym(ufl.as_tensor([[u, u.dx(0).dx(0), 0], [u.dx(1), u.dx(1), 0], [0, 0, 0]])) # Get vertices of cell # Coords storage XYZXYZXYZ basix_c_e = basix.create_element( basix.ElementFamily.P, basix.CellType[cell], 1, discontinuous=False ) coords = basix_c_e.points # Using same basix element for coordinate element and coefficient coeff_points = basix_c_e.points # Compile expression at interpolation points of second order Lagrange space b_el = basix.create_element(basix.ElementFamily.P, basix.CellType[cell], 0, discontinuous=True) points = b_el.points obj, module, code = ffcx.codegeneration.jit.compile_expressions( [(expr, points)], cffi_extra_compile_args=compile_args ) ffi = cffi.FFI() expression = obj[0] dtype = np.float64 c_type = "double" c_xtype = "double" output = np.zeros(9 * points.shape[0], dtype=dtype) # Define coefficients for u = x + 2 * y u_coeffs = u_coeffs = coeff_points.T[0] + 2 * coeff_points.T[1] consts = np.array([], dtype=dtype) entity_index = np.array([0], dtype=np.intc) quad_perm = np.array([0], dtype=np.dtype("uint8")) expression.tabulate_tensor_float64( ffi.cast(f"{c_type} *", output.ctypes.data), ffi.cast(f"{c_type} *", u_coeffs.ctypes.data), ffi.cast(f"{c_type} *", consts.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.cast("int *", entity_index.ctypes.data), ffi.cast("uint8_t *", quad_perm.ctypes.data), ) def exact_expr(x): val = np.zeros((9, x.shape[1]), dtype=dtype) val[0] = x[0] + 2 * x[1] val[1] = 0 + 0.5 * 2 val[3] = 0.5 * 2 + 0 val[4] = 2 return val.T exact = exact_expr(points.T) assert np.allclose(exact, output) def test_grad_constant(compile_args): """Test constant numbering. Test if numbering of constants are correct after UFL eliminates the constant inside the gradient. """ c_el = basix.ufl.element("Lagrange", "triangle", 1, shape=(2,)) mesh = ufl.Mesh(c_el) x = ufl.SpatialCoordinate(mesh) first_constant = ufl.Constant(mesh) second_constant = ufl.Constant(mesh) expr = second_constant * ufl.Dx(x[0] ** 2 + first_constant, 0) dtype = np.float64 points = np.array([[0.33, 0.25]], dtype=dtype) obj, _, _ = ffcx.codegeneration.jit.compile_expressions( [(expr, points)], cffi_extra_compile_args=compile_args ) ffi = cffi.FFI() expression = obj[0] c_type = "double" c_xtype = "double" output = np.zeros(1, dtype=dtype) # Define constants coords = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=dtype) u_coeffs = np.array([], dtype=dtype) consts = np.array([3, 7], dtype=dtype) entity_index = np.array([0], dtype=np.intc) quad_perm = np.array([0], dtype=np.dtype("uint8")) expression.tabulate_tensor_float64( ffi.cast(f"{c_type} *", output.ctypes.data), ffi.cast(f"{c_type} *", u_coeffs.ctypes.data), ffi.cast(f"{c_type} *", consts.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.cast("int *", entity_index.ctypes.data), ffi.cast("uint8_t *", quad_perm.ctypes.data), ) assert output[0] == pytest.approx(consts[1] * 2 * points[0, 0]) def test_facet_expression(compile_args): """Test facet expression containing a facet normal on a manifold.""" c_el = basix.ufl.element("Lagrange", "triangle", 1, shape=(3,)) mesh = ufl.Mesh(c_el) n = ufl.FacetNormal(mesh) expr = n dtype = np.float64 points = np.array([[0.5]], dtype=dtype) obj, _, _ = ffcx.codegeneration.jit.compile_expressions( [(expr, points)], cffi_extra_compile_args=compile_args ) ffi = cffi.FFI() expression = obj[0] c_type = "double" c_xtype = "double" output = np.zeros(3, dtype=dtype) # Define constants coords = np.array([[0.3, 0.6, 0.1], [1.2, 0.4, 0.2], [1.3, 1.4, 0.3]], dtype=dtype) u_coeffs = np.array([], dtype=dtype) consts = np.array([], dtype=dtype) entity_index = np.array([0], dtype=np.intc) quad_perm = np.array([0], dtype=np.dtype("uint8")) tangents = np.array([coords[1] - coords[2], coords[2] - coords[0], coords[0] - coords[1]]) midpoints = np.array( [ coords[1] + (coords[2] - coords[1]) / 2, coords[0] + (coords[2] - coords[0]) / 2, coords[1] + (coords[1] - coords[0]) / 2, ] ) for i, (tangent, midpoint) in enumerate(zip(tangents, midpoints)): # normalize tangent tangent /= np.linalg.norm(tangent) # Tabulate facet normal output[:] = 0 entity_index[0] = i expression.tabulate_tensor_float64( ffi.cast(f"{c_type} *", output.ctypes.data), ffi.cast(f"{c_type} *", u_coeffs.ctypes.data), ffi.cast(f"{c_type} *", consts.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.cast("int *", entity_index.ctypes.data), ffi.cast("uint8_t *", quad_perm.ctypes.data), ) # Assert that facet normal is perpendicular to tangent assert np.isclose(np.dot(output, tangent), 0) # Check that norm of facet normal is 1 assert np.isclose(np.linalg.norm(output), 1) # Check that facet normal is pointing out of the cell assert np.dot(midpoint - coords[i], output) > 0 ffcx-0.9.0/test/test_jit_forms.py000066400000000000000000001220651470142666300170510ustar00rootroot00000000000000# Copyright (C) 2018-2020 Garth N. Wells & Matthew Scroggs # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import sys import basix.ufl import numpy as np import pytest import sympy import ufl from sympy.abc import x, y, z import ffcx.codegeneration.jit from ffcx.codegeneration.utils import dtype_to_c_type, dtype_to_scalar_dtype @pytest.mark.parametrize( "dtype,expected_result", [ ( "float64", np.array([[1.0, -0.5, -0.5], [-0.5, 0.5, 0.0], [-0.5, 0.0, 0.5]], dtype=np.float64), ), pytest.param( "complex128", np.array( [ [1.0 + 0j, -0.5 + 0j, -0.5 + 0j], [-0.5 + 0j, 0.5 + 0j, 0.0 + 0j], [-0.5 + 0j, 0.0 + 0j, 0.5 + 0j], ], dtype=np.complex128, ), marks=pytest.mark.xfail( sys.platform.startswith("win32"), raises=NotImplementedError, reason="missing _Complex", ), ), ], ) def test_laplace_bilinear_form_2d(dtype, expected_result, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = ufl.FunctionSpace(domain, element) kappa = ufl.Constant(domain, shape=(2, 2)) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.tr(kappa) * ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={"scalar_type": dtype}, cffi_extra_compile_args=compile_args ) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) ffi = module.ffi form0 = compiled_forms[0] offsets = form0.form_integral_offsets cell = module.lib.cell assert offsets[cell + 1] - offsets[cell] == 1 integral_id = form0.form_integral_ids[offsets[cell]] assert integral_id == -1 default_integral = form0.form_integrals[offsets[cell]] A = np.zeros((3, 3), dtype=dtype) w = np.array([], dtype=dtype) kappa_value = np.array([[1.0, 2.0], [3.0, 4.0]]) c = np.array(kappa_value.flatten(), dtype=dtype) xdtype = dtype_to_scalar_dtype(dtype) coords = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=xdtype) c_type, c_xtype = dtype_to_c_type(dtype), dtype_to_c_type(xdtype) kernel = getattr(default_integral, f"tabulate_tensor_{dtype}") kernel( ffi.cast(f"{c_type} *", A.ctypes.data), ffi.cast(f"{c_type} *", w.ctypes.data), ffi.cast(f"{c_type} *", c.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.NULL, ffi.NULL, ) assert np.allclose(A, np.trace(kappa_value) * expected_result) @pytest.mark.parametrize( "dtype,expected_result", [ ( np.float32, np.array( [ [1.0 / 12.0, 1.0 / 24.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 12.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 24.0, 1.0 / 12.0], ], dtype=np.float32, ), ), # ("longdouble", # np.array( # [[1.0 / 12.0, 1.0 / 24.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 12.0, 1.0 / 24.0], # [1.0 / 24.0, 1.0 / 24.0, 1.0 / 12.0]], # dtype=np.longdouble)), ( np.float64, np.array( [ [1.0 / 12.0, 1.0 / 24.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 12.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 24.0, 1.0 / 12.0], ], dtype=np.float64, ), ), pytest.param( np.complex128, np.array( [ [1.0 / 12.0, 1.0 / 24.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 12.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 24.0, 1.0 / 12.0], ], dtype=np.complex128, ), marks=pytest.mark.xfail( sys.platform.startswith("win32"), raises=NotImplementedError, reason="missing _Complex", ), ), pytest.param( np.complex64, np.array( [ [1.0 / 12.0, 1.0 / 24.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 12.0, 1.0 / 24.0], [1.0 / 24.0, 1.0 / 24.0, 1.0 / 12.0], ], dtype=np.complex64, ), marks=pytest.mark.xfail( sys.platform.startswith("win32"), raises=NotImplementedError, reason="missing _Complex", ), ), ], ) def test_mass_bilinear_form_2d(dtype, expected_result, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(u, v) * ufl.dx L = ufl.conj(v) * ufl.dx forms = [a, L] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={"scalar_type": dtype}, cffi_extra_compile_args=compile_args ) @pytest.mark.parametrize( "dtype,expected_result", [ ( "float64", np.array([[1.0, -0.5, -0.5], [-0.5, 0.5, 0.0], [-0.5, 0.0, 0.5]], dtype=np.float64) - (1.0 / 24.0) * np.array([[2, 1, 1], [1, 2, 1], [1, 1, 2]], dtype=np.float64), ), pytest.param( "complex128", np.array([[1.0, -0.5, -0.5], [-0.5, 0.5, 0.0], [-0.5, 0.0, 0.5]], dtype=np.complex128) - (1.0j / 24.0) * np.array([[2, 1, 1], [1, 2, 1], [1, 1, 2]], dtype=np.complex128), marks=pytest.mark.xfail( sys.platform.startswith("win32"), raises=NotImplementedError, reason="missing _Complex", ), ), ], ) def test_helmholtz_form_2d(dtype, expected_result, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) if np.issubdtype(dtype, np.complexfloating): k = ufl.constantvalue.ComplexValue(1j) elif np.issubdtype(dtype, np.floating): k = 1.0 else: raise RuntimeError( "Unknown mode type", ) a = (ufl.inner(ufl.grad(u), ufl.grad(v)) - ufl.inner(k * u, v)) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={"scalar_type": dtype}, cffi_extra_compile_args=compile_args ) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) form0 = compiled_forms[0].form_integrals[0] A = np.zeros((3, 3), dtype=dtype) w = np.array([], dtype=dtype) c = np.array([], dtype=dtype) ffi = module.ffi xdtype = dtype_to_scalar_dtype(dtype) coords = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=xdtype) c_type, c_xtype = dtype_to_c_type(dtype), dtype_to_c_type(xdtype) kernel = getattr(form0, f"tabulate_tensor_{dtype}") kernel( ffi.cast(f"{c_type} *", A.ctypes.data), ffi.cast(f"{c_type} *", w.ctypes.data), ffi.cast(f"{c_type} *", c.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.NULL, ffi.NULL, ) np.testing.assert_allclose(A, expected_result) @pytest.mark.parametrize( "dtype,expected_result", [ ( "float64", np.array( [ [0.5, -1 / 6, -1 / 6, -1 / 6], [-1 / 6, 1 / 6, 0.0, 0.0], [-1 / 6, 0.0, 1 / 6, 0.0], [-1 / 6, 0.0, 0.0, 1 / 6], ], dtype=np.float64, ), ), pytest.param( "complex128", np.array( [ [0.5 + 0j, -1 / 6 + 0j, -1 / 6 + 0j, -1 / 6 + 0j], [-1 / 6 + 0j, 1 / 6 + 0j, 0.0 + 0j, 0.0 + 0j], [-1 / 6 + 0j, 0.0 + 0j, 1 / 6 + 0j, 0.0 + 0j], [-1 / 6 + 0j, 0.0 + 0j, 0.0 + 0j, 1 / 6 + 0j], ], dtype=np.complex128, ), marks=pytest.mark.xfail( sys.platform.startswith("win32"), raises=NotImplementedError, reason="missing _Complex", ), ), ], ) def test_laplace_bilinear_form_3d(dtype, expected_result, compile_args): element = basix.ufl.element("Lagrange", "tetrahedron", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "tetrahedron", 1, shape=(3,))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={"scalar_type": dtype}, cffi_extra_compile_args=compile_args ) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) form0 = compiled_forms[0].form_integrals[0] A = np.zeros((4, 4), dtype=dtype) w = np.array([], dtype=dtype) c = np.array([], dtype=dtype) ffi = module.ffi xdtype = dtype_to_scalar_dtype(dtype) coords = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=xdtype) c_type, c_xtype = dtype_to_c_type(dtype), dtype_to_c_type(xdtype) kernel = getattr(form0, f"tabulate_tensor_{dtype}") kernel( ffi.cast(f"{c_type} *", A.ctypes.data), ffi.cast(f"{c_type} *", w.ctypes.data), ffi.cast(f"{c_type} *", c.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.NULL, ffi.NULL, ) assert np.allclose(A, expected_result) def test_form_coefficient(compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TestFunction(space), ufl.TrialFunction(space) g = ufl.Coefficient(space) a = g * ufl.inner(u, v) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, cffi_extra_compile_args=compile_args ) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) form0 = compiled_forms[0].form_integrals[0] A = np.zeros((3, 3), dtype=np.float64) w = np.array([1.0, 1.0, 1.0], dtype=np.float64) c = np.array([], dtype=np.float64) perm = np.array([0], dtype=np.uint8) ffi = module.ffi coords = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=np.float64) kernel = getattr(form0, "tabulate_tensor_float64") kernel( ffi.cast("double *", A.ctypes.data), ffi.cast("double *", w.ctypes.data), ffi.cast("double *", c.ctypes.data), ffi.cast("double *", coords.ctypes.data), ffi.NULL, ffi.cast("uint8_t *", perm.ctypes.data), ) A_analytic = np.array([[2, 1, 1], [1, 2, 1], [1, 1, 2]], dtype=np.float64) / 24.0 A_diff = A - A_analytic assert np.isclose(A_diff.max(), 0.0) assert np.isclose(A_diff.min(), 0.0) def test_subdomains(compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a0 = ufl.inner(u, v) * ufl.dx + ufl.inner(u, v) * ufl.dx(2) a1 = ufl.inner(u, v) * ufl.dx(2) + ufl.inner(u, v) * ufl.dx a2 = ufl.inner(u, v) * ufl.dx(2) + ufl.inner(u, v) * ufl.dx(1) a3 = ufl.inner(u, v) * ufl.ds(210) + ufl.inner(u, v) * ufl.ds(0) forms = [a0, a1, a2, a3] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={"scalar_type": "float64"}, cffi_extra_compile_args=compile_args ) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) form0 = compiled_forms[0] offsets = form0.form_integral_offsets cell = module.lib.cell ids = [form0.form_integral_ids[j] for j in range(offsets[cell], offsets[cell + 1])] assert ids[0] == -1 and ids[1] == 2 form1 = compiled_forms[1] offsets = form1.form_integral_offsets ids = [form1.form_integral_ids[j] for j in range(offsets[cell], offsets[cell + 1])] assert ids[0] == -1 and ids[1] == 2 form2 = compiled_forms[2] offsets = form2.form_integral_offsets ids = [form2.form_integral_ids[j] for j in range(offsets[cell], offsets[cell + 1])] assert ids[0] == 1 and ids[1] == 2 form3 = compiled_forms[3] offsets = form3.form_integral_offsets assert offsets[cell + 1] - offsets[cell] == 0 exf = module.lib.exterior_facet ids = [form3.form_integral_ids[j] for j in range(offsets[exf], offsets[exf + 1])] assert ids[0] == 0 and ids[1] == 210 @pytest.mark.parametrize( "dtype", [ "float64", pytest.param( "complex128", marks=pytest.mark.xfail( sys.platform.startswith("win32"), raises=NotImplementedError, reason="missing _Complex", ), ), ], ) def test_interior_facet_integral(dtype, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a0 = ufl.inner(ufl.jump(ufl.grad(u)), ufl.jump(ufl.grad(v))) * ufl.dS forms = [a0] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={"scalar_type": dtype}, cffi_extra_compile_args=compile_args ) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) ffi = module.ffi form0 = compiled_forms[0] ffi = module.ffi integral0 = form0.form_integrals[0] A = np.zeros((6, 6), dtype=dtype) w = np.array([], dtype=dtype) c = np.array([], dtype=dtype) facets = np.array([0, 2], dtype=np.intc) perms = np.array([0, 1], dtype=np.uint8) xdtype = dtype_to_scalar_dtype(dtype) coords = np.array( [ [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0], ], dtype=xdtype, ) c_type = dtype_to_c_type(dtype) c_xtype = dtype_to_c_type(xdtype) kernel = getattr(integral0, f"tabulate_tensor_{dtype}") kernel( ffi.cast(f"{c_type} *", A.ctypes.data), ffi.cast(f"{c_type} *", w.ctypes.data), ffi.cast(f"{c_type} *", c.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.cast("int *", facets.ctypes.data), ffi.cast("uint8_t *", perms.ctypes.data), ) @pytest.mark.parametrize( "dtype", [ "float64", pytest.param( "complex128", marks=pytest.mark.xfail( sys.platform.startswith("win32"), raises=NotImplementedError, reason="missing _Complex", ), ), ], ) def test_conditional(dtype, compile_args): element = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = ufl.FunctionSpace(domain, element) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) x = ufl.SpatialCoordinate(domain) condition = ufl.Or(ufl.ge(ufl.real(x[0] + x[1]), 0.1), ufl.ge(ufl.real(x[1] + x[1] ** 2), 0.1)) c1 = ufl.conditional(condition, 2.0, 1.0) a = c1 * ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx x1x2 = ufl.real(x[0] + ufl.as_ufl(2) * x[1]) c2 = ufl.conditional(ufl.ge(x1x2, 0), 6.0, 0.0) b = c2 * ufl.conj(v) * ufl.dx forms = [a, b] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={"scalar_type": dtype}, cffi_extra_compile_args=compile_args ) form0 = compiled_forms[0].form_integrals[0] form1 = compiled_forms[1].form_integrals[0] ffi = module.ffi A1 = np.zeros((3, 3), dtype=dtype) w1 = np.array([1.0, 1.0, 1.0], dtype=dtype) c = np.array([], dtype=dtype) xdtype = dtype_to_scalar_dtype(dtype) coords = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=xdtype) c_type, c_xtype = dtype_to_c_type(dtype), dtype_to_c_type(xdtype) kernel0 = ffi.cast( f"ufcx_tabulate_tensor_{dtype} *", getattr(form0, f"tabulate_tensor_{dtype}") ) kernel0( ffi.cast(f"{c_type} *", A1.ctypes.data), ffi.cast(f"{c_type} *", w1.ctypes.data), ffi.cast(f"{c_type} *", c.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.NULL, ffi.NULL, ) expected_result = np.array([[2, -1, -1], [-1, 1, 0], [-1, 0, 1]], dtype=dtype) assert np.allclose(A1, expected_result) A2 = np.zeros(3, dtype=dtype) w2 = np.array([1.0, 1.0, 1.0], dtype=dtype) kernel1 = ffi.cast( f"ufcx_tabulate_tensor_{dtype} *", getattr(form1, f"tabulate_tensor_{dtype}") ) kernel1( ffi.cast(f"{c_type} *", A2.ctypes.data), ffi.cast(f"{c_type} *", w2.ctypes.data), ffi.cast(f"{c_type} *", c.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.NULL, ffi.NULL, ) expected_result = np.ones(3, dtype=dtype) assert np.allclose(A2, expected_result) def test_custom_quadrature(compile_args): ve = basix.ufl.element("P", "triangle", 1, shape=(2,)) mesh = ufl.Mesh(ve) e = basix.ufl.element("P", mesh.ufl_cell().cellname(), 2) V = ufl.FunctionSpace(mesh, e) u, v = ufl.TrialFunction(V), ufl.TestFunction(V) points = [[0.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.5, 0.5], [0.0, 0.5], [0.5, 0.0]] weights = [1 / 12] * 6 a = ( u * v * ufl.dx( metadata={ "quadrature_rule": "custom", "quadrature_points": points, "quadrature_weights": weights, } ) ) forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, cffi_extra_compile_args=compile_args ) ffi = module.ffi form = compiled_forms[0] default_integral = form.form_integrals[0] A = np.zeros((6, 6), dtype=np.float64) w = np.array([], dtype=np.float64) c = np.array([], dtype=np.float64) coords = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=np.float64) kernel = getattr(default_integral, "tabulate_tensor_float64") kernel( ffi.cast("double *", A.ctypes.data), ffi.cast("double *", w.ctypes.data), ffi.cast("double *", c.ctypes.data), ffi.cast("double *", coords.ctypes.data), ffi.NULL, ffi.NULL, ) # Check that A is diagonal assert np.count_nonzero(A - np.diag(np.diagonal(A))) == 0 def test_curl_curl(compile_args): V = basix.ufl.element("N1curl", "triangle", 2) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = ufl.FunctionSpace(domain, V) u, v = ufl.TrialFunction(space), ufl.TestFunction(space) a = ufl.inner(ufl.curl(u), ufl.curl(v)) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, cffi_extra_compile_args=compile_args ) def lagrange_triangle_symbolic(order, corners=((1, 0), (2, 0), (0, 1)), fun=lambda i: i): from sympy import S poly_basis = [x**i * y**j for i in range(order + 1) for j in range(order + 1 - i)] # vertices eval_points = [S(c) for c in corners] # edges for e in [(1, 2), (0, 2), (0, 1)]: p0 = corners[e[0]] p1 = corners[e[1]] if order > 3: raise NotImplementedError elif order == 3: eval_points += [ tuple(S(a) + (b - a) * i for a, b in zip(p0, p1)) for i in [(1 - 1 / sympy.sqrt(5)) / 2, (1 + 1 / sympy.sqrt(5)) / 2] ] else: eval_points += [ tuple(S(a) + sympy.Rational((b - a) * i, order) for a, b in zip(p0, p1)) for i in range(1, order) ] # face for f in [(0, 1, 2)]: p0 = corners[f[0]] p1 = corners[f[1]] p2 = corners[f[2]] eval_points += [ tuple( S(a) + sympy.Rational((b - a) * i, order) + sympy.Rational((c - a) * j, order) for a, b, c in zip(p0, p1, p2) ) for i in range(1, order) for j in range(1, order - i) ] dual_mat = [[f.subs(x, p[0]).subs(y, p[1]) for p in eval_points] for f in poly_basis] dual_mat = sympy.Matrix(dual_mat) mat = dual_mat.inv() functions = [sum(i * j for i, j in zip(mat.row(k), poly_basis)) for k in range(mat.rows)] results = [] for f in functions: integrand = fun(f) results.append(integrand.integrate((x, 1 - y, 2 - 2 * y), (y, 0, 1))) return results @pytest.mark.parametrize("dtype", ["float64"]) @pytest.mark.parametrize( "sym_fun,ufl_fun", [ (lambda i: i, lambda i: i), (lambda i: i.diff(x), lambda i: ufl.grad(i)[0]), (lambda i: i.diff(y), lambda i: ufl.grad(i)[1]), ], ) @pytest.mark.parametrize("order", [1, 2, 3]) def test_lagrange_triangle(compile_args, order, dtype, sym_fun, ufl_fun): sym = lagrange_triangle_symbolic(order, fun=sym_fun) element = basix.ufl.element("Lagrange", "triangle", order) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = ufl.FunctionSpace(domain, element) v = ufl.TestFunction(space) a = ufl_fun(v) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={"scalar_type": dtype}, cffi_extra_compile_args=compile_args ) ffi = module.ffi form0 = compiled_forms[0] assert form0.form_integral_offsets[module.lib.cell + 1] == 1 default_integral = form0.form_integrals[0] b = np.zeros((order + 2) * (order + 1) // 2, dtype=dtype) w = np.array([], dtype=dtype) xdtype = dtype_to_scalar_dtype(dtype) coords = np.array([[1.0, 0.0, 0.0], [2.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=xdtype) c_type, c_xtype = dtype_to_c_type(dtype), dtype_to_c_type(xdtype) kernel = getattr(default_integral, f"tabulate_tensor_{dtype}") kernel( ffi.cast(f"{c_type} *", b.ctypes.data), ffi.cast(f"{c_type} *", w.ctypes.data), ffi.NULL, ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.NULL, ffi.NULL, ) # Check that the result is the same as for sympy assert np.allclose(b, [float(i) for i in sym]) def lagrange_tetrahedron_symbolic( order, corners=((1, 0, 0), (2, 0, 0), (0, 1, 0), (0, 0, 1)), fun=lambda i: i ): from sympy import S poly_basis = [ x**i * y**j * z**k for i in range(order + 1) for j in range(order + 1 - i) for k in range(order + 1 - i - j) ] # vertices eval_points = [S(c) for c in corners] # edges for e in [(2, 3), (1, 3), (1, 2), (0, 3), (0, 2), (0, 1)]: p0 = corners[e[0]] p1 = corners[e[1]] if order > 3: raise NotImplementedError elif order == 3: eval_points += [ tuple(S(a) + (b - a) * i for a, b in zip(p0, p1)) for i in [(1 - 1 / sympy.sqrt(5)) / 2, (1 + 1 / sympy.sqrt(5)) / 2] ] else: eval_points += [ tuple(S(a) + sympy.Rational((b - a) * i, order) for a, b in zip(p0, p1)) for i in range(1, order) ] # face for f in [(1, 2, 3), (0, 2, 3), (0, 1, 3), (0, 1, 2)]: p0 = corners[f[0]] p1 = corners[f[1]] p2 = corners[f[2]] eval_points += [ tuple( S(a) + sympy.Rational((b - a) * i, order) + sympy.Rational((c - a) * j, order) for a, b, c in zip(p0, p1, p2) ) for i in range(1, order) for j in range(1, order - i) ] # interior for v in [(0, 1, 2, 3)]: p0 = corners[v[0]] p1 = corners[v[1]] p2 = corners[v[2]] p3 = corners[v[3]] eval_points += [ tuple( S(a) + sympy.Rational((b - a) * i, order) + sympy.Rational((c - a) * j, order) + sympy.Rational((d - a) * k, order) for a, b, c, d in zip(p0, p1, p2, p3) ) for i in range(1, order) for j in range(1, order - i) for k in range(1, order - i - j) ] dual_mat = [ [f.subs(x, p[0]).subs(y, p[1]).subs(z, p[2]) for p in eval_points] for f in poly_basis ] dual_mat = sympy.Matrix(dual_mat) mat = dual_mat.inv() functions = [sum(i * j for i, j in zip(mat.row(k), poly_basis)) for k in range(mat.rows)] results = [] for f in functions: integrand = fun(f) results.append( integrand.integrate((x, 1 - y - z, 2 - 2 * y - 2 * z), (y, 0, 1 - z), (z, 0, 1)) ) return results @pytest.mark.parametrize("dtype", ["float64"]) @pytest.mark.parametrize( "sym_fun,ufl_fun", [ (lambda i: i, lambda i: i), (lambda i: i.diff(x), lambda i: ufl.grad(i)[0]), (lambda i: i.diff(y), lambda i: ufl.grad(i)[1]), ], ) @pytest.mark.parametrize("order", [1, 2, 3]) def test_lagrange_tetrahedron(compile_args, order, dtype, sym_fun, ufl_fun): sym = lagrange_tetrahedron_symbolic(order, fun=sym_fun) element = basix.ufl.element("Lagrange", "tetrahedron", order) domain = ufl.Mesh(basix.ufl.element("Lagrange", "tetrahedron", 1, shape=(3,))) space = ufl.FunctionSpace(domain, element) v = ufl.TestFunction(space) a = ufl_fun(v) * ufl.dx forms = [a] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={"scalar_type": dtype}, cffi_extra_compile_args=compile_args ) ffi = module.ffi form0 = compiled_forms[0] assert form0.form_integral_offsets[module.lib.cell + 1] == 1 default_integral = form0.form_integrals[0] b = np.zeros((order + 3) * (order + 2) * (order + 1) // 6, dtype=dtype) w = np.array([], dtype=dtype) xdtype = dtype_to_scalar_dtype(dtype) coords = np.array([1.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=xdtype) c_type, c_xtype = dtype_to_c_type(dtype), dtype_to_c_type(xdtype) kernel = getattr(default_integral, f"tabulate_tensor_{dtype}") kernel( ffi.cast(f"{c_type} *", b.ctypes.data), ffi.cast(f"{c_type} *", w.ctypes.data), ffi.NULL, ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.NULL, ffi.NULL, ) # Check that the result is the same as for sympy assert np.allclose(b, [float(i) for i in sym]) def test_prism(compile_args): element = basix.ufl.element("Lagrange", "prism", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "prism", 1, shape=(3,))) space = ufl.FunctionSpace(domain, element) v = ufl.TestFunction(space) L = v * ufl.dx forms = [L] compiled_forms, module, _ = ffcx.codegeneration.jit.compile_forms( forms, options={"scalar_type": "float64"}, cffi_extra_compile_args=compile_args ) ffi = module.ffi form0 = compiled_forms[0] assert form0.form_integral_offsets[module.lib.cell + 1] == 1 default_integral = form0.form_integrals[0] b = np.zeros(6, dtype=np.float64) coords = np.array( [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0], dtype=np.float64, ) kernel = getattr(default_integral, "tabulate_tensor_float64") kernel( ffi.cast("double *", b.ctypes.data), ffi.NULL, ffi.NULL, ffi.cast("double *", coords.ctypes.data), ffi.NULL, ffi.NULL, ) assert np.isclose(sum(b), 0.5) @pytest.mark.xfail( sys.platform.startswith("win32"), raises=NotImplementedError, reason="missing _Complex" ) def test_complex_operations(compile_args): dtype = "complex128" cell = "triangle" c_element = basix.ufl.element("Lagrange", cell, 1, shape=(2,)) mesh = ufl.Mesh(c_element) element = basix.ufl.element("DG", cell, 0, shape=(2,)) V = ufl.FunctionSpace(mesh, element) u = ufl.Coefficient(V) J1 = ufl.real(u)[0] * ufl.imag(u)[1] * ufl.conj(u)[0] * ufl.dx J2 = ufl.real(u[0]) * ufl.imag(u[1]) * ufl.conj(u[0]) * ufl.dx forms = [J1, J2] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={"scalar_type": dtype}, cffi_extra_compile_args=compile_args ) form0 = compiled_forms[0].form_integrals[0] form1 = compiled_forms[1].form_integrals[0] ffi = module.ffi w1 = np.array([3 + 5j, 8 - 7j], dtype=dtype) c = np.array([], dtype=dtype) xdtype = dtype_to_scalar_dtype(dtype) coords = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=xdtype) J_1 = np.zeros((1), dtype=dtype) c_type, c_xtype = dtype_to_c_type(dtype), dtype_to_c_type(xdtype) kernel0 = ffi.cast( f"ufcx_tabulate_tensor_{dtype} *", getattr(form0, f"tabulate_tensor_{dtype}") ) kernel0( ffi.cast(f"{c_type} *", J_1.ctypes.data), ffi.cast(f"{c_type} *", w1.ctypes.data), ffi.cast(f"{c_type} *", c.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.NULL, ffi.NULL, ) expected_result = np.array( [0.5 * np.real(w1[0]) * np.imag(w1[1]) * (np.real(w1[0]) - 1j * np.imag(w1[0]))], dtype=dtype, ) assert np.allclose(J_1, expected_result) J_2 = np.zeros((1), dtype=dtype) kernel1 = ffi.cast( f"ufcx_tabulate_tensor_{dtype} *", getattr(form1, f"tabulate_tensor_{dtype}") ) kernel1( ffi.cast(f"{c_type} *", J_2.ctypes.data), ffi.cast(f"{c_type} *", w1.ctypes.data), ffi.cast(f"{c_type} *", c.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.NULL, ffi.NULL, ) assert np.allclose(J_2, expected_result) assert np.allclose(J_1, J_2) def test_invalid_function_name(compile_args): # Monkey patch to force invalid name old_str = ufl.Coefficient.__str__ ufl.Coefficient.__str__ = lambda self: "invalid function name" V = basix.ufl.element("Lagrange", "triangle", 1) domain = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) space = ufl.FunctionSpace(domain, V) u = ufl.Coefficient(space) a = ufl.inner(u, u) * ufl.dx forms = [a] try: compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, cffi_extra_compile_args=compile_args ) except ValueError: pass except Exception: raise RuntimeError("Compilation should fail with ValueError.") # Revert monkey patch for other tests ufl.Coefficient.__str__ = old_str def test_interval_vertex_quadrature(compile_args): c_el = basix.ufl.element("Lagrange", "interval", 1, shape=(1,)) mesh = ufl.Mesh(c_el) x = ufl.SpatialCoordinate(mesh) dx = ufl.Measure("dx", metadata={"quadrature_rule": "vertex"}) b = x[0] * dx forms = [b] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, cffi_extra_compile_args=compile_args ) ffi = module.ffi form0 = compiled_forms[0] assert form0.form_integral_offsets[module.lib.cell + 1] == 1 default_integral = form0.form_integrals[0] J = np.zeros(1, dtype=np.float64) a = np.pi b = np.exp(1) coords = np.array([a, 0.0, 0.0, b, 0.0, 0.0], dtype=np.float64) kernel = getattr(default_integral, "tabulate_tensor_float64") kernel( ffi.cast("double *", J.ctypes.data), ffi.NULL, ffi.NULL, ffi.cast("double *", coords.ctypes.data), ffi.NULL, ffi.NULL, ) assert np.isclose(J[0], (0.5 * a + 0.5 * b) * np.abs(b - a)) def test_facet_vertex_quadrature(compile_args): """Test facet vertex quadrature""" c_el = basix.ufl.element("Lagrange", "quadrilateral", 1, shape=(2,)) mesh = ufl.Mesh(c_el) x = ufl.SpatialCoordinate(mesh) ds = ufl.Measure("ds", metadata={"quadrature_rule": "vertex"}) expr = x[0] + ufl.cos(x[1]) b1 = expr * ds ds_c = ufl.Measure( "ds", metadata={ "quadrature_rule": "custom", "quadrature_points": np.array([[0.0], [1.0]]), "quadrature_weights": np.array([1.0 / 2.0, 1.0 / 2.0]), }, ) b2 = expr * ds_c forms = [b1, b2] compiled_forms, module, _ = ffcx.codegeneration.jit.compile_forms( forms, cffi_extra_compile_args=compile_args ) ffi = module.ffi assert len(compiled_forms) == 2 solutions = [] for form in compiled_forms: offsets = form.form_integral_offsets exf = module.lib.exterior_facet assert offsets[exf + 1] - offsets[exf] == 1 default_integral = form.form_integrals[offsets[exf]] J = np.zeros(1, dtype=np.float64) a = np.pi b = np.exp(1) coords = np.array( [a, 0.1, 0.0, a + b, 0.0, 0.0, a, a, 0.0, a + 2 * b, a, 0.0], dtype=np.float64 ) # First facet is between vertex 0 and 1 in coords facets = np.array([0], dtype=np.intc) kernel = getattr(default_integral, "tabulate_tensor_float64") kernel( ffi.cast("double *", J.ctypes.data), ffi.NULL, ffi.NULL, ffi.cast("double *", coords.ctypes.data), ffi.cast("int *", facets.ctypes.data), ffi.NULL, ) solutions.append(J[0]) # Test against exact result assert np.isclose( J[0], (0.5 * (a + np.cos(0.1)) + 0.5 * (a + b + np.cos(0))) * np.sqrt(b**2 + 0.1**2) ) # Compare custom quadrature with vertex quadrature assert np.isclose(solutions[0], solutions[1]) def test_manifold_derivatives(compile_args): """Test higher order derivatives on manifolds""" c_el = basix.ufl.element("Lagrange", "interval", 1, shape=(2,)) mesh = ufl.Mesh(c_el) x = ufl.SpatialCoordinate(mesh) dx = ufl.Measure("dx", domain=mesh) order = 4 el = basix.ufl.element("Lagrange", "interval", order) V = ufl.FunctionSpace(mesh, el) u = ufl.Coefficient(V) d = 5.3 f_ex = d * order * (order - 1) * x[1] ** (order - 2) expr = u.dx(1).dx(1) - f_ex J = expr * expr * dx compiled_forms, module, _ = ffcx.codegeneration.jit.compile_forms( [J], cffi_extra_compile_args=compile_args ) default_integral = compiled_forms[0].form_integrals[0] scale = 2.5 coords = np.array([0.0, 0.0, 0.0, 0.0, scale, 0.0], dtype=np.float64) dof_coords = scale * el._element.points.reshape(-1) w = np.array([d * d_c**order for d_c in dof_coords], dtype=np.float64) c = np.array([], dtype=np.float64) perm = np.array([0], dtype=np.uint8) ffi = module.ffi J = np.zeros(1, dtype=np.float64) kernel = getattr(default_integral, "tabulate_tensor_float64") kernel( ffi.cast("double *", J.ctypes.data), ffi.cast("double *", w.ctypes.data), ffi.cast("double *", c.ctypes.data), ffi.cast("double *", coords.ctypes.data), ffi.NULL, ffi.cast("uint8_t *", perm.ctypes.data), ) assert np.isclose(J[0], 0.0) def test_integral_grouping(compile_args): """We group integrals with common integrands to avoid duplicated integration kernels. This means that `inner(u, v)*dx((1,2,3)) + inner(grad(u), grad(v))*dx(2) + inner(u,v)*dx` is grouped as 1. `inner(u,v)*dx(("everywhere", 1, 3))` 2. `(inner(grad(u), grad(v)) + inner(u, v))*dx(2)` Each of the forms has one generated `tabulate_tensor_*` function, which is referred to multiple times in `integrals_` and `integral_ids_` """ mesh = ufl.Mesh(basix.ufl.element("Lagrange", "triangle", 1, shape=(2,))) V = ufl.FunctionSpace(mesh, basix.ufl.element("Lagrange", "triangle", 1)) u = ufl.TrialFunction(V) v = ufl.TestFunction(V) a = ( ufl.inner(u, v) * ufl.dx((1, 2, 3)) + ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx(2) + ufl.inner(u, v) * ufl.dx ) compiled_forms, module, _ = ffcx.codegeneration.jit.compile_forms( [a], cffi_extra_compile_args=compile_args ) # NOTE: This assumes that the first integral type is cell integrals, see UFCx.h cell = module.lib.cell num_integrals = ( compiled_forms[0].form_integral_offsets[cell + 1] - compiled_forms[0].form_integral_offsets[cell] ) assert num_integrals == 4 unique_integrals = set( [ compiled_forms[0].form_integrals[compiled_forms[0].form_integral_offsets[cell] + i] for i in range(num_integrals) ] ) assert len(unique_integrals) == 2 @pytest.mark.parametrize("dtype", ["float64"]) @pytest.mark.parametrize("permutation", [[0], [1]]) def test_mixed_dim_form(compile_args, dtype, permutation): """Test that the local element tensor corresponding to a mixed-dimensional form is correct. The form involves an integral over a facet of the cell. The trial function and a coefficient f are of codim 0. The test function and a coefficient g are of codim 1. We compare against another form where the test function and g are codim 0 but have the same trace on the facet. """ def tabulate_tensor(ele_type, V_cell_type, W_cell_type, coeffs): "Helper function to create a form and compute the local element tensor" V_ele = basix.ufl.element(ele_type, V_cell_type, 2) W_ele = basix.ufl.element(ele_type, W_cell_type, 1) gdim = 2 V_domain = ufl.Mesh(basix.ufl.element("Lagrange", V_cell_type, 1, shape=(gdim,))) W_domain = ufl.Mesh(basix.ufl.element("Lagrange", W_cell_type, 1, shape=(gdim,))) V = ufl.FunctionSpace(V_domain, V_ele) W = ufl.FunctionSpace(W_domain, W_ele) u = ufl.TrialFunction(V) q = ufl.TestFunction(W) f = ufl.Coefficient(V) g = ufl.Coefficient(W) ds = ufl.Measure("ds", domain=V_domain) n = ufl.FacetNormal(V_domain) forms = [ufl.inner(f * g * ufl.grad(u), n * q) * ds] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={"scalar_type": dtype}, cffi_extra_compile_args=compile_args ) form0 = compiled_forms[0] default_integral = form0.form_integrals[0] kernel = getattr(default_integral, f"tabulate_tensor_{dtype}") A = np.zeros((W_ele.dim, V_ele.dim), dtype=dtype) w = np.array(coeffs, dtype=dtype) c = np.array([], dtype=dtype) facet = np.array([0], dtype=np.intc) perm = np.array(permutation, dtype=np.uint8) xdtype = dtype_to_scalar_dtype(dtype) coords = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=xdtype) c_type = dtype_to_c_type(dtype) c_xtype = dtype_to_c_type(xdtype) ffi = module.ffi kernel( ffi.cast(f"{c_type} *", A.ctypes.data), ffi.cast(f"{c_type} *", w.ctypes.data), ffi.cast(f"{c_type} *", c.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.cast("int *", facet.ctypes.data), ffi.cast("uint8_t *", perm.ctypes.data), ) return A # Define the element type ele_type = "Lagrange" # Define the cell type for each space V_cell_type = "triangle" Vbar_cell_type = "interval" # Coefficient data # f is a quadratic on each edge that is 0 at the vertices and 1 at the midpoint f_data = [0, 0, 0, 1, 1, 1] # g is a linear function along the edge that is 0 at one vertex and 1 at the other g_data = [0, 1] # Collect coefficient data coeffs = f_data + g_data # Tabulate the tensor for the mixed-dimensional form A = tabulate_tensor(ele_type, V_cell_type, Vbar_cell_type, coeffs) # Compare to a reference result. Here, we compare against the same kernel but with # the interval element replaced with a triangle. # We create some data for g on the triangle whose trace coincides with g on the interval g_data = [0, 0, 1] coeffs_ref = f_data + g_data A_ref = tabulate_tensor(ele_type, V_cell_type, V_cell_type, coeffs_ref) # Remove the entries for the extra test DOF on the triangle element A_ref = A_ref[1:][:] # If the permutation is 1, this means the triangle sees its edge as being flipped # relative to the edge's global orientation. Thus the result is the same as swapping # cols 1 and 2 and cols 4 and 5 of the reference result. if permutation[0] == 1: A_ref[:, [1, 2]] = A_ref[:, [2, 1]] A_ref[:, [4, 5]] = A_ref[:, [5, 4]] assert np.allclose(A, A_ref) ffcx-0.9.0/test/test_lnodes.py000066400000000000000000000065421470142666300163420ustar00rootroot00000000000000import importlib import numpy as np import pytest from cffi import FFI from ffcx.codegeneration import lnodes as L from ffcx.codegeneration.C.c_implementation import CFormatter from ffcx.codegeneration.utils import dtype_to_c_type @pytest.mark.parametrize("dtype", ("float32", "float64", "intc")) def test_gemm(dtype): # Test LNodes simple matrix-matrix multiply in C p, q, r = 5, 16, 12 A = L.Symbol("A", dtype=L.DataType.SCALAR) B = L.Symbol("B", dtype=L.DataType.SCALAR) C = L.Symbol("C", dtype=L.DataType.SCALAR) code = [L.Comment(f"Matrix multiply A{p, r} = B{p, q} * C{q, r}")] i = L.Symbol("i", dtype=L.DataType.INT) j = L.Symbol("j", dtype=L.DataType.INT) k = L.Symbol("k", dtype=L.DataType.INT) m_ij = L.MultiIndex([i, j], [p, q]) m_ik = L.MultiIndex([i, k], [p, r]) m_jk = L.MultiIndex([j, k], [q, r]) body = [L.AssignAdd(A[m_ik], B[m_ij] * C[m_jk])] body = [L.ForRange(i, 0, p, body=body)] body = [L.ForRange(j, 0, q, body=body)] code += [L.ForRange(k, 0, r, body=body)] # Format into C and compile with CFFI Q = CFormatter(dtype=dtype) c_scalar = dtype_to_c_type(dtype) decl = f"void gemm({c_scalar} *A, {c_scalar} *B, {c_scalar} *C)" c_code = decl + "{\n" + Q.c_format(L.StatementList(code)) + "\n}\n" ffibuilder = FFI() ffibuilder.cdef(decl + ";") ffibuilder.set_source(f"_gemm_{c_scalar}", c_code) ffibuilder.compile(verbose=True) _gemm = importlib.import_module(f"_gemm_{c_scalar}") gemm = _gemm.lib.gemm ffi = _gemm.ffi A = np.zeros((p, r), dtype=dtype) B = np.ones((p, q), dtype=dtype) C = np.ones((q, r), dtype=dtype) pA = ffi.cast(f"{c_scalar} *", A.ctypes.data) pB = ffi.cast(f"{c_scalar} *", B.ctypes.data) pC = ffi.cast(f"{c_scalar} *", C.ctypes.data) gemm(pA, pB, pC) assert np.all(A == q) @pytest.mark.parametrize("dtype", ("float32", "float64", "intc")) def test_gemv(dtype): # Test LNodes simple matvec multiply in C p, q = 5, 16 y = L.Symbol("y", dtype=L.DataType.SCALAR) A = L.Symbol("A", dtype=L.DataType.SCALAR) x = L.Symbol("x", dtype=L.DataType.SCALAR) code = [L.Comment(f"Matrix-vector multiply y({p}) = A{p, q} * x({q})")] i = L.Symbol("i", dtype=L.DataType.INT) j = L.Symbol("j", dtype=L.DataType.INT) m_ij = L.MultiIndex([i, j], [p, q]) body = [L.AssignAdd(y[i], A[m_ij] * x[j])] body = [L.ForRange(i, 0, p, body=body)] code += [L.ForRange(j, 0, q, body=body)] # Format into C and compile with CFFI Q = CFormatter(dtype=dtype) c_scalar = dtype_to_c_type(dtype) decl = f"void gemm({c_scalar} *y, {c_scalar} *A, {c_scalar} *x)" c_code = decl + "{\n" + Q.c_format(L.StatementList(code)) + "\n}\n" ffibuilder = FFI() ffibuilder.cdef(decl + ";") ffibuilder.set_source(f"_gemv_{c_scalar}", c_code) ffibuilder.compile(verbose=True) _gemv = importlib.import_module(f"_gemv_{c_scalar}") gemv = _gemv.lib.gemm ffi = _gemv.ffi y = np.arange(p, dtype=dtype) x = np.arange(q, dtype=dtype) A = np.outer(y, x) py = ffi.cast(f"{c_scalar} *", y.ctypes.data) pA = ffi.cast(f"{c_scalar} *", A.ctypes.data) px = ffi.cast(f"{c_scalar} *", x.ctypes.data) # Compute expected result s2 = q * (q - 1) * (2 * q - 1) // 6 + 1 result = np.arange(p, dtype=dtype) * s2 gemv(py, pA, px) assert np.all(y == result) ffcx-0.9.0/test/test_signatures.py000066400000000000000000000055471470142666300172460ustar00rootroot00000000000000# Copyright (C) 2024 Igor A. Baratta # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import sys import basix.ufl import cffi import numpy as np import pytest import ufl import ffcx.codegeneration.jit import ffcx.codegeneration.utils as utils def generate_kernel(forms, scalar_type, options): """Generate kernel for given forms.""" compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, options={"scalar_type": scalar_type} ) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) form0 = compiled_forms[0] offsets = form0.form_integral_offsets cell = module.lib.cell assert offsets[cell + 1] - offsets[cell] == 1 integral_id = form0.form_integral_ids[offsets[cell]] assert integral_id == -1 default_integral = form0.form_integrals[offsets[cell]] kernel = getattr(default_integral, f"tabulate_tensor_{scalar_type}") return kernel, code, module @pytest.mark.parametrize( "dtype", [ "float32", "float64", pytest.param( "complex64", marks=pytest.mark.xfail( sys.platform.startswith("win32"), raises=NotImplementedError, reason="missing _Complex", ), ), pytest.param( "complex128", marks=pytest.mark.xfail( sys.platform.startswith("win32"), raises=NotImplementedError, reason="missing _Complex", ), ), ], ) def test_numba_kernel_signature(dtype): try: import numba except ImportError: pytest.skip("Numba not installed") # Create a simple form mesh = ufl.Mesh(basix.ufl.element("P", "triangle", 2, shape=(2,))) e = basix.ufl.element("Lagrange", "triangle", 2) V = ufl.FunctionSpace(mesh, e) u = ufl.TrialFunction(V) v = ufl.TestFunction(V) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx # Generate and compile the kernel kernel, code, module = generate_kernel([a], dtype, {}) # Convert to numpy dtype np_dtype = np.dtype(dtype) # Generate the Numba signature xtype = utils.dtype_to_scalar_dtype(dtype) signature = utils.numba_ufcx_kernel_signature(np_dtype, xtype) assert isinstance(signature, numba.core.typing.templates.Signature) # Get the signature from the compiled kernel ffi = cffi.FFI() args = ffi.typeof(kernel).args # check that the signature is equivalent to the one in the generated code assert len(args) == len(signature.args) for i, (arg, sig) in enumerate(zip(args, signature.args)): type_name = sig.name.replace(str(np_dtype), utils.dtype_to_c_type(np_dtype)) ctypes_name = type_name.replace(" *", "*") assert ctypes_name == type_name ffcx-0.9.0/test/test_submesh.py000066400000000000000000000056451470142666300165270ustar00rootroot00000000000000# Copyright (C) 2024 Jørgen S. Dokken # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later from __future__ import annotations import sys import basix.ufl import numpy as np import pytest import ufl import ffcx.codegeneration.jit from ffcx.codegeneration.utils import dtype_to_c_type, dtype_to_scalar_dtype def compute_tensor(forms: list[ufl.form.Form], dtype: str, compile_args: list[str]): """Helper-function to compute matrix for a P1-Lagrange problem""" compiled_forms, module, _ = ffcx.codegeneration.jit.compile_forms( forms, options={"scalar_type": dtype}, cffi_extra_compile_args=compile_args ) ffi = module.ffi form0 = compiled_forms[0] offsets = form0.form_integral_offsets cell = module.lib.cell assert offsets[cell + 1] - offsets[cell] == 1 integral_id = form0.form_integral_ids[offsets[cell]] assert integral_id == -1 default_integral = form0.form_integrals[offsets[cell]] A = np.zeros((3, 3), dtype=dtype) w = np.array([], dtype=dtype) c = np.array([], dtype=dtype) xdtype = dtype_to_scalar_dtype(dtype) coords = np.array([[1.0, 2.0, 0.0], [1.5, 2.3, 0.0], [6.0, 1.8, 0.0]], dtype=xdtype) c_type, c_xtype = dtype_to_c_type(dtype), dtype_to_c_type(xdtype) kernel = getattr(default_integral, f"tabulate_tensor_{dtype}") kernel( ffi.cast(f"{c_type} *", A.ctypes.data), ffi.cast(f"{c_type} *", w.ctypes.data), ffi.cast(f"{c_type} *", c.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.NULL, ffi.NULL, ) return A @pytest.mark.parametrize( "dtype", [ "float64", pytest.param( "complex128", marks=pytest.mark.xfail( sys.platform.startswith("win32"), raises=NotImplementedError, reason="missing _Complex", ), ), ], ) def test_multiple_mesh_codim0(dtype, compile_args): # Define coordinate element and element used in parent and sub-mesh element = basix.ufl.element("Lagrange", "triangle", 1) coordinate_element = basix.ufl.element("Lagrange", "triangle", 1, shape=(2,)) domain = ufl.Mesh(coordinate_element) space = ufl.FunctionSpace(domain, element) u_parent = ufl.TrialFunction(space) # Create submesh and functionspace on submesh sub_domain = ufl.Mesh(coordinate_element) subspace = ufl.FunctionSpace(sub_domain, element) v_sub = ufl.TestFunction(subspace) # a = ufl.inner(u_parent.dx(0), v_sub.dx(0)) * ufl.dx(domain=domain) A = compute_tensor([a], dtype, compile_args) # Compute reference solution on with test and trial function from same mesh v_parent = ufl.TestFunction(space) a_org = ufl.inner(u_parent.dx(0), v_parent.dx(0)) * ufl.dx(domain=domain) A_org = compute_tensor([a_org], dtype, compile_args) np.testing.assert_allclose(A, A_org) ffcx-0.9.0/test/test_tensor_product.py000066400000000000000000000102301470142666300201150ustar00rootroot00000000000000# Copyright (C) 2023 Igor A. Baratta # # This file is part of FFCx. (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import basix.ufl import numpy as np import pytest import ufl import ffcx.codegeneration.jit from ffcx.codegeneration.utils import dtype_to_c_type, dtype_to_scalar_dtype def cell_to_gdim(cell_type): """Return geometric dimension of cell.""" if cell_type == basix.CellType.quadrilateral: return 2 elif cell_type == basix.CellType.hexahedron: return 3 else: raise NotImplementedError def create_tensor_product_element(cell_type, degree, variant, shape=None): """Create tensor product element.""" family = basix.ElementFamily.P element = basix.create_tp_element(family, cell_type, degree, variant) uflelement = basix.ufl.wrap_element(element) if shape is None: return uflelement else: return basix.ufl.blocked_element(uflelement, shape=shape) def generate_kernel(forms, dtype, options): """Generate kernel for given forms.""" # use a different cache directory for each option sf = options.get("sum_factorization", False) cache_dir = f"./ffcx-cache-{sf}" compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, cache_dir=cache_dir, options=options ) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) form0 = compiled_forms[0] offsets = form0.form_integral_offsets cell = module.lib.cell assert offsets[cell + 1] - offsets[cell] == 1 integral_id = form0.form_integral_ids[offsets[cell]] assert integral_id == -1 default_integral = form0.form_integrals[offsets[cell]] kernel = getattr(default_integral, f"tabulate_tensor_{dtype}") return kernel, code, module @pytest.mark.parametrize("dtype", ["float32", "float64"]) @pytest.mark.parametrize("P", [1, 2, 3]) @pytest.mark.parametrize("cell_type", [basix.CellType.quadrilateral, basix.CellType.hexahedron]) def test_bilinear_form(dtype, P, cell_type): gdim = cell_to_gdim(cell_type) element = create_tensor_product_element(cell_type, P, basix.LagrangeVariant.gll_warped) coords = create_tensor_product_element( cell_type, 1, basix.LagrangeVariant.gll_warped, shape=(gdim,) ) mesh = ufl.Mesh(coords) V = ufl.FunctionSpace(mesh, element) u, v = ufl.TrialFunction(V), ufl.TestFunction(V) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx ndofs = element.dim A = np.zeros((ndofs, ndofs), dtype=dtype) w = np.array([], dtype=dtype) c = np.array([], dtype=dtype) xdtype = dtype_to_scalar_dtype(dtype) if cell_type == basix.CellType.quadrilateral: coords = np.array( [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]], dtype=xdtype ) elif cell_type == basix.CellType.hexahedron: coords = np.array( [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [1.0, 1.0, 1.0], ], dtype=xdtype, ) c_type = dtype_to_c_type(dtype) c_xtype = dtype_to_c_type(xdtype) kernel, code, module = generate_kernel([a], dtype, options={"scalar_type": dtype}) ffi = module.ffi kernel( ffi.cast(f"{c_type} *", A.ctypes.data), ffi.cast(f"{c_type} *", w.ctypes.data), ffi.cast(f"{c_type} *", c.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.NULL, ffi.NULL, ) # Use sum factorization A1 = np.zeros((ndofs, ndofs), dtype=dtype) kernel, code, module = generate_kernel( [a], dtype, options={"scalar_type": dtype, "sum_factorization": True} ) ffi = module.ffi kernel( ffi.cast(f"{c_type} *", A1.ctypes.data), ffi.cast(f"{c_type} *", w.ctypes.data), ffi.cast(f"{c_type} *", c.ctypes.data), ffi.cast(f"{c_xtype} *", coords.ctypes.data), ffi.NULL, ffi.NULL, ) np.testing.assert_allclose(A, A1, rtol=1e-6, atol=1e-6)