pax_global_header 0000666 0000000 0000000 00000000064 14135323752 0014517 g ustar 00root root 0000000 0000000 52 comment=136cf74438751ce307f405b66a64c5f6edee803e
fiat-2019.2.0~git20210419.7d418fa/ 0000775 0000000 0000000 00000000000 14135323752 0015477 5 ustar 00root root 0000000 0000000 fiat-2019.2.0~git20210419.7d418fa/.github/ 0000775 0000000 0000000 00000000000 14135323752 0017037 5 ustar 00root root 0000000 0000000 fiat-2019.2.0~git20210419.7d418fa/.github/workflows/ 0000775 0000000 0000000 00000000000 14135323752 0021074 5 ustar 00root root 0000000 0000000 fiat-2019.2.0~git20210419.7d418fa/.github/workflows/pythonapp.yml 0000664 0000000 0000000 00000002443 14135323752 0023644 0 ustar 00root root 0000000 0000000 # This workflow will install Python dependencies, run tests and lint
# with a single version of Python For more information see:
# https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
name: FIAT CI
on: [push, pull_request]
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
python-version: [3.7, 3.8]
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Lint with flake8
run: |
pip install flake8
flake8 --statistics .
- name: Check documentation style
run: |
pip install pydocstyle
python -m pydocstyle .
- name: Install FIAT
run: pip install .
- name: Test with pytest
run: |
pip install coveralls pytest pytest-cov pytest-xdist
DATA_REPO_GIT="" pytest --cov=FIAT/ test/
- name: Coveralls
if: ${{ github.repository == 'FEniCS/fiat' && github.head_ref == '' && matrix.os == 'ubuntu-latest' && matrix.python-version == '3.8' }}
env:
COVERALLS_REPO_TOKEN: ${{ secrets.COVERALLS_REPO_TOKEN }}
run: coveralls
fiat-2019.2.0~git20210419.7d418fa/AUTHORS 0000664 0000000 0000000 00000002653 14135323752 0016555 0 ustar 00root root 0000000 0000000 Main author:
Robert C. Kirby
email: robert.c.kirby@ttu.edu
www: http://www.math.ttu.edu/~kirby/
Contributors:
Marie Rognes
email: meg@simula.no
Anders Logg
email: logg@simula.no
www: http://home.simula.no/~logg/
Kristian B. Ølgaard
email: k.b.oelgaard@gmail.com
Garth N. Wells
email: gnw20@cam.ac.uk
www: http://www.eng.cam.ac.uk/~gnw20/
Andy R. Terrel
email: aterrel@uchicago.edu
Andrew T. T. McRae
email: a.t.t.mcrae@bath.ac.uk
Jan Blechta
email: blechta@karlin.mff.cuni.cz
David A. Ham
email: david.ham@imperial.ac.uk
Miklós Homolya
email: m.homolya14@imperial.ac.uk
Lawrence Mitchell
email: lawrence.mitchell@imperial.ac.uk
Colin Cotter
email: colin.cotter@imperial.ac.uk>
Thomas H. Gibson
email: t.gibson15@imperial.ac.uk
Florian Rathgeber
email: florian.rathgeber@gmail.com
Aslak Bergersen
email: aslak.bergersen@gmail.com
Nico Schlömer
email: nico.schloemer@gmail.com
Johannes Ring
email: johannr@simula.no
Matt Knepley
email: knepley@rice.edu
Lizao Li
email: lzlarryli@gmail.com
Martin Sandve Alnæs
email: martinal@simula.no
Matthias Liertzer
email: matthias@liertzer.at
Ivan Yashchuk
email: ivan.yashchuk@aalto.fi
Cyrus Cheng
email: cyruscycheng21@gmail.com
Reuben W. Hill
email: reuben.hill10@imperial.ac.uk
fiat-2019.2.0~git20210419.7d418fa/CODE_OF_CONDUCT.md 0000664 0000000 0000000 00000007250 14135323752 0020302 0 ustar 00root root 0000000 0000000 Code of Conduct
===============
Our Pledge
----------
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our
project and our community a harassment-free experience for everyone,
regardless of age, body size, disability, ethnicity, sex
characteristics, gender identity and expression, level of experience,
education, socio-economic status, nationality, personal appearance,
race, religion, or sexual identity and orientation.
Our Standards
-------------
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others’ private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
Our Responsibilities
--------------------
Project maintainers are responsible for clarifying the standards of
acceptable behavior and are expected to take appropriate and fair
corrective action in response to any instances of unacceptable
behavior.
Project maintainers have the right and responsibility to remove, edit,
or reject comments, commits, code, wiki edits, issues, and other
contributions that are not aligned to this Code of Conduct, or to ban
temporarily or permanently any contributor for other behaviors that
they deem inappropriate, threatening, offensive, or harmful.
Scope
-----
This Code of Conduct applies both within project spaces and in public
spaces when an individual is representing the project or its
community. Examples of representing a project or community include
using an official project e-mail address, posting via an official
social media account, or acting as an appointed representative at an
online or offline event. Representation of a project may be further
defined and clarified by project maintainers.
Enforcement
-----------
Instances of abusive, harassing, or otherwise unacceptable behavior
may be reported by contacting the project team at
fenics-steering-council@googlegroups.com. Alternatively, you may
report individually to one of the members of the Steering
Council. Complaints will be reviewed and investigated and will result
in a response that is deemed necessary and appropriate to the
circumstances. The project team is obligated to maintain
confidentiality with regard to the reporter of an incident. Further
details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct
in good faith may face temporary or permanent repercussions as
determined by other members of the project’s leadership.
If you feel that your report has not been followed up satisfactorily,
then you may contact our parent organisation NumFOCUS at
info@numfocus.org for further redress.
Attribution
-----------
This Code of Conduct is adapted from the Contributor Covenant, version
1.4, available at
https://www.contributor-covenant.org/version/1/4/code-of-conduct.html.
Adaptations
-----------
* Allow reporting to individual Steering Council members
* Added the option to contact NumFOCUS for further redress.
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq fiat-2019.2.0~git20210419.7d418fa/COPYING 0000664 0000000 0000000 00000104374 14135323752 0016543 0 ustar 00root root 0000000 0000000
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
Copyright (C)
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
.
fiat-2019.2.0~git20210419.7d418fa/COPYING.LESSER 0000664 0000000 0000000 00000016727 14135323752 0017543 0 ustar 00root root 0000000 0000000 GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.
fiat-2019.2.0~git20210419.7d418fa/ChangeLog.rst 0000664 0000000 0000000 00000010437 14135323752 0020065 0 ustar 00root root 0000000 0000000 Changelog
=========
2019.2.0.dev0
-------------
- No changes yet.
2019.1.0 (2019-04-17)
---------------------
- Added an implementation of the Bell finite element (K. Bell 1969
doi:10.1002/nme.1620010180), with extra basis functions for
transformation theory from Kirby (2018) doi:10.5802/smai-jcm.33.
2018.1.0 (2018-06-14)
---------------------
- Remove Python 2 support
- Generalize ``Bubble`` element to ``CodimBubble`` to create bubbles on entity
of arbitrary codimension; add ``FacetBubble``, keep ``Bubble`` (as bubble
on cell)
2017.2.0 (2017-12-05)
---------------------
- Add quadrilateral and hexahedron reference cells
- Add quadrilateral and hexahedron elements (with a wrapping class for TensorProductElement)
2017.1.0.post1 (2017-09-12)
---------------------------
- Change PyPI package name to fenics-fiat.
2017.1.0 (2017-05-09)
---------------------
- Extended the discontinuous trace element ``HDivTrace`` to support tensor
product reference cells. Tabulating the trace defined on a tensor product
cell relies on the argument ``entity`` to specify a facet of the cell. The
backwards compatibility case ``entity=None`` does not support tensor product
tabulation as a result. Tabulating the trace of triangles or tetrahedron
remains unaffected and works as usual with or without an entity argument.
2016.2.0 (2016-11-30)
---------------------
- Enable Travis CI on GitHub
- Add Firedrake quadrilateral cell
- Add tensor product cell
- Add facet -> cell coordinate transformation
- Add Bubble element
- Add discontinuous Taylor element
- Add broken element and H(div) trace element
- Add element restrictions onto mesh entities
- Add tensor product elements (for tensor product cells)
- Add H(div) and H(curl) element-modifiers for TPEs
- Add enriched element, i.e. sum of elements (e.g. for building Mini)
- Add multidimensional taylor elements
- Add Gauss Lobatto Legendre elements
- Finding non-vanishing DoFs on a facets
- Add tensor product quadrature rule
- Make regression tests working again after few years
- Prune modules having only ``__main__`` code including
transform_morley, transform_hermite
(ff86250820e2b18f7a0df471c97afa87207e9a7d)
- Remove newdubiner module (b3b120d40748961fdd0727a4e6c62450198d9647,
reference removed by cb65a84ac639977b7be04962cc1351481ca66124)
- Switch from homebrew factorial/gamma to math module (wraps C std lib)
2016.1.0 (2016-06-23)
---------------------
- Minor fixes
1.6.0 (2015-07-28)
------------------
- Support DG on facets through the element "Discontinuous Lagrange
Trace"
1.5.0 (2015-01-12)
------------------
- Require Python 2.7
- Python 3 support
- Remove ScientificPython dependency and add dependency on SymPy
1.4.0 (2014-06-02)
------------------
- Support discontinuous/broken Raviart-Thomas
1.3.0 (2014-01-07)
------------------
- Version bump.
1.1.0 (2013-01-07)
------------------
- Support second kind Nedelecs on tetrahedra over degree >= 2
- Support Brezzi-Douglas-Fortin-Marini elements (of degree 1, 2), again
1.0.0 (2011-12-07)
------------------
- No changes since 1.0-beta, only updating the version number
1.0-beta (2011-08-11)
---------------------
- Change of license to LGPL v3+
- Minor fixes
0.9.9 (2011-02-23)
------------------
- Add ``__version__``
- Add second kind Nedeles on triangles
0.9.2 (2010-07-01)
------------------
- Bug fix for 1D quadrature
0.9.1 (2010-02-03)
------------------
- Cleanups and small fixes
0.9.0 (2010-02-01)
------------------
- New improved interface with support for arbitrary reference elements
0.3.5
-----
0.3.4
-----
0.3.3
-----
- Bug fix in Nedelec
- Support for ufc element
0.3.1
-----
- Bug fix in DOF orderings for H(div) elements
- Preliminary type system for DOF
- Allow user to change ordering of reference dof
- Brezzi-Douglas-Fortin-Marini elements working
0.3.0
-----
- Small changes to H(div) elements preparing for integration with FFC
- Switch to numpy
- Added primitive testing harness in fiat/testing
0.2.4
-----
- Fixed but in P0.py
0.2.3
-----
- Updated topology/ geometry so to allow different orderings of entities
0.2.2
-----
- Added Raviart-Thomas element, verified RT0 against old version of code
- Started work on BDFM, Nedelec (not working)
- Fixed projection, union of sets (error in SVD usage)
- Vector-valued spaces have general number of components
fiat-2019.2.0~git20210419.7d418fa/FIAT/ 0000775 0000000 0000000 00000000000 14135323752 0016222 5 ustar 00root root 0000000 0000000 fiat-2019.2.0~git20210419.7d418fa/FIAT/P0.py 0000664 0000000 0000000 00000002603 14135323752 0017054 0 ustar 00root root 0000000 0000000 # Copyright (C) 2005 The University of Chicago
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Written by Robert C. Kirby
# Modified by Andrew T. T. McRae (Imperial College London)
#
# This work is partially supported by the US Department of Energy
# under award number DE-FG02-04ER25650
from FIAT import dual_set, functional, polynomial_set, finite_element
import numpy
class P0Dual(dual_set.DualSet):
def __init__(self, ref_el):
entity_ids = {}
nodes = []
vs = numpy.array(ref_el.get_vertices())
if ref_el.get_dimension() == 0:
bary = ()
else:
bary = tuple(numpy.average(vs, 0))
nodes = [functional.PointEvaluation(ref_el, bary)]
entity_ids = {}
top = ref_el.get_topology()
for dim in sorted(top):
entity_ids[dim] = {}
for entity in sorted(top[dim]):
entity_ids[dim][entity] = []
entity_ids[dim] = {0: [0]}
super(P0Dual, self).__init__(nodes, ref_el, entity_ids)
class P0(finite_element.CiarletElement):
def __init__(self, ref_el):
poly_set = polynomial_set.ONPolynomialSet(ref_el, 0)
dual = P0Dual(ref_el)
degree = 0
formdegree = ref_el.get_spatial_dimension() # n-form
super(P0, self).__init__(poly_set, dual, degree, formdegree)
fiat-2019.2.0~git20210419.7d418fa/FIAT/__init__.py 0000664 0000000 0000000 00000010721 14135323752 0020334 0 ustar 00root root 0000000 0000000 """FInite element Automatic Tabulator -- supports constructing and
evaluating arbitrary order Lagrange and many other elements.
Simplices in one, two, and three dimensions are supported."""
import pkg_resources
# Import finite element classes
from FIAT.finite_element import FiniteElement, CiarletElement # noqa: F401
from FIAT.argyris import Argyris
from FIAT.bernstein import Bernstein
from FIAT.bell import Bell
from FIAT.argyris import QuinticArgyris
from FIAT.brezzi_douglas_marini import BrezziDouglasMarini
from FIAT.brezzi_douglas_fortin_marini import BrezziDouglasFortinMarini
from FIAT.discontinuous_lagrange import DiscontinuousLagrange
from FIAT.discontinuous_taylor import DiscontinuousTaylor
from FIAT.discontinuous_raviart_thomas import DiscontinuousRaviartThomas
from FIAT.serendipity import Serendipity
from FIAT.discontinuous_pc import DPC
from FIAT.hermite import CubicHermite
from FIAT.lagrange import Lagrange
from FIAT.gauss_lobatto_legendre import GaussLobattoLegendre
from FIAT.gauss_legendre import GaussLegendre
from FIAT.gauss_radau import GaussRadau
from FIAT.morley import Morley
from FIAT.nedelec import Nedelec
from FIAT.nedelec_second_kind import NedelecSecondKind
from FIAT.P0 import P0
from FIAT.raviart_thomas import RaviartThomas
from FIAT.crouzeix_raviart import CrouzeixRaviart
from FIAT.regge import Regge
from FIAT.hellan_herrmann_johnson import HellanHerrmannJohnson
from FIAT.arnold_winther import ArnoldWinther
from FIAT.arnold_winther import ArnoldWintherNC
from FIAT.mardal_tai_winther import MardalTaiWinther
from FIAT.bubble import Bubble, FacetBubble
from FIAT.tensor_product import TensorProductElement
from FIAT.enriched import EnrichedElement
from FIAT.nodal_enriched import NodalEnrichedElement
from FIAT.discontinuous import DiscontinuousElement
from FIAT.hdiv_trace import HDivTrace
from FIAT.mixed import MixedElement # noqa: F401
from FIAT.restricted import RestrictedElement # noqa: F401
from FIAT.quadrature_element import QuadratureElement # noqa: F401
from FIAT.kong_mulder_veldhuizen import KongMulderVeldhuizen # noqa: F401
# Important functionality
from FIAT.quadrature import make_quadrature # noqa: F401
from FIAT.quadrature_schemes import create_quadrature # noqa: F401
from FIAT.reference_element import ufc_cell, ufc_simplex # noqa: F401
from FIAT.hdivcurl import Hdiv, Hcurl # noqa: F401
__version__ = pkg_resources.get_distribution("fenics-fiat").version
# List of supported elements and mapping to element classes
supported_elements = {"Argyris": Argyris,
"Bell": Bell,
"Bernstein": Bernstein,
"Brezzi-Douglas-Marini": BrezziDouglasMarini,
"Brezzi-Douglas-Fortin-Marini": BrezziDouglasFortinMarini,
"Bubble": Bubble,
"FacetBubble": FacetBubble,
"Crouzeix-Raviart": CrouzeixRaviart,
"Discontinuous Lagrange": DiscontinuousLagrange,
"S": Serendipity,
"DPC": DPC,
"Discontinuous Taylor": DiscontinuousTaylor,
"Discontinuous Raviart-Thomas": DiscontinuousRaviartThomas,
"Hermite": CubicHermite,
"Lagrange": Lagrange,
"Kong-Mulder-Veldhuizen": KongMulderVeldhuizen,
"Gauss-Lobatto-Legendre": GaussLobattoLegendre,
"Gauss-Legendre": GaussLegendre,
"Gauss-Radau": GaussRadau,
"Morley": Morley,
"Nedelec 1st kind H(curl)": Nedelec,
"Nedelec 2nd kind H(curl)": NedelecSecondKind,
"Raviart-Thomas": RaviartThomas,
"Regge": Regge,
"EnrichedElement": EnrichedElement,
"NodalEnrichedElement": NodalEnrichedElement,
"TensorProductElement": TensorProductElement,
"BrokenElement": DiscontinuousElement,
"HDiv Trace": HDivTrace,
"Hellan-Herrmann-Johnson": HellanHerrmannJohnson,
"Conforming Arnold-Winther": ArnoldWinther,
"Nonconforming Arnold-Winther": ArnoldWintherNC,
"Mardal-Tai-Winther": MardalTaiWinther}
# List of extra elements
extra_elements = {"P0": P0,
"Quintic Argyris": QuinticArgyris}
fiat-2019.2.0~git20210419.7d418fa/FIAT/argyris.py 0000664 0000000 0000000 00000011134 14135323752 0020254 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008 Robert C. Kirby (Texas Tech University)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT import finite_element, polynomial_set, dual_set, functional
from FIAT.reference_element import TRIANGLE
class ArgyrisDualSet(dual_set.DualSet):
def __init__(self, ref_el, degree):
entity_ids = {}
nodes = []
cur = 0
top = ref_el.get_topology()
verts = ref_el.get_vertices()
sd = ref_el.get_spatial_dimension()
if ref_el.get_shape() != TRIANGLE:
raise ValueError("Argyris only defined on triangles")
pe = functional.PointEvaluation
pd = functional.PointDerivative
pnd = functional.PointNormalDerivative
# get jet at each vertex
entity_ids[0] = {}
for v in sorted(top[0]):
nodes.append(pe(ref_el, verts[v]))
# first derivatives
for i in range(sd):
alpha = [0] * sd
alpha[i] = 1
nodes.append(pd(ref_el, verts[v], alpha))
# second derivatives
alphas = [[2, 0], [1, 1], [0, 2]]
for alpha in alphas:
nodes.append(pd(ref_el, verts[v], alpha))
entity_ids[0][v] = list(range(cur, cur + 6))
cur += 6
# edge dof
entity_ids[1] = {}
for e in sorted(top[1]):
# normal derivatives at degree - 4 points on each edge
ndpts = ref_el.make_points(1, e, degree - 3)
ndnds = [pnd(ref_el, e, pt) for pt in ndpts]
nodes.extend(ndnds)
entity_ids[1][e] = list(range(cur, cur + len(ndpts)))
cur += len(ndpts)
# point value at degree-5 points on each edge
if degree > 5:
ptvalpts = ref_el.make_points(1, e, degree - 4)
ptvalnds = [pe(ref_el, pt) for pt in ptvalpts]
nodes.extend(ptvalnds)
entity_ids[1][e] += list(range(cur, cur + len(ptvalpts)))
cur += len(ptvalpts)
# internal dof
entity_ids[2] = {}
if degree > 5:
internalpts = ref_el.make_points(2, 0, degree - 3)
internalnds = [pe(ref_el, pt) for pt in internalpts]
nodes.extend(internalnds)
entity_ids[2][0] = list(range(cur, cur + len(internalpts)))
cur += len(internalpts)
else:
entity_ids[2] = {0: []}
super(ArgyrisDualSet, self).__init__(nodes, ref_el, entity_ids)
class QuinticArgyrisDualSet(dual_set.DualSet):
def __init__(self, ref_el):
entity_ids = {}
nodes = []
cur = 0
# make nodes by getting points
# need to do this dimension-by-dimension, facet-by-facet
top = ref_el.get_topology()
verts = ref_el.get_vertices()
sd = ref_el.get_spatial_dimension()
if ref_el.get_shape() != TRIANGLE:
raise ValueError("Argyris only defined on triangles")
pd = functional.PointDerivative
# get jet at each vertex
entity_ids[0] = {}
for v in sorted(top[0]):
nodes.append(functional.PointEvaluation(ref_el, verts[v]))
# first derivatives
for i in range(sd):
alpha = [0] * sd
alpha[i] = 1
nodes.append(pd(ref_el, verts[v], alpha))
# second derivatives
alphas = [[2, 0], [1, 1], [0, 2]]
for alpha in alphas:
nodes.append(pd(ref_el, verts[v], alpha))
entity_ids[0][v] = list(range(cur, cur + 6))
cur += 6
# edge dof -- normal at each edge midpoint
entity_ids[1] = {}
for e in sorted(top[1]):
pt = ref_el.make_points(1, e, 2)[0]
n = functional.PointNormalDerivative(ref_el, e, pt)
nodes.append(n)
entity_ids[1][e] = [cur]
cur += 1
entity_ids[2] = {0: []}
super(QuinticArgyrisDualSet, self).__init__(nodes, ref_el, entity_ids)
class Argyris(finite_element.CiarletElement):
"""The Argyris finite element."""
def __init__(self, ref_el, degree):
poly_set = polynomial_set.ONPolynomialSet(ref_el, degree)
dual = ArgyrisDualSet(ref_el, degree)
super(Argyris, self).__init__(poly_set, dual, degree)
class QuinticArgyris(finite_element.CiarletElement):
"""The Argyris finite element."""
def __init__(self, ref_el):
poly_set = polynomial_set.ONPolynomialSet(ref_el, 5)
dual = QuinticArgyrisDualSet(ref_el)
super().__init__(poly_set, dual, 5)
fiat-2019.2.0~git20210419.7d418fa/FIAT/arnold_winther.py 0000664 0000000 0000000 00000014004 14135323752 0021612 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
"""Implementation of the Arnold-Winther finite elements."""
# Copyright (C) 2020 by Robert C. Kirby (Baylor University)
# Modified by Francis Aznaran (Oxford University)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT.finite_element import CiarletElement
from FIAT.dual_set import DualSet
from FIAT.polynomial_set import ONSymTensorPolynomialSet, ONPolynomialSet
from FIAT.functional import (
PointwiseInnerProductEvaluation as InnerProduct,
FrobeniusIntegralMoment as FIM,
IntegralMomentOfTensorDivergence,
IntegralLegendreNormalNormalMoment,
IntegralLegendreNormalTangentialMoment)
from FIAT.quadrature import make_quadrature
import numpy
class ArnoldWintherNCDual(DualSet):
def __init__(self, cell, degree):
if not degree == 2:
raise ValueError("Nonconforming Arnold-Winther elements are"
"only defined for degree 2.")
dofs = []
dof_ids = {}
dof_ids[0] = {0: [], 1: [], 2: []}
dof_ids[1] = {0: [], 1: [], 2: []}
dof_ids[2] = {0: []}
dof_cur = 0
# no vertex dofs
# proper edge dofs now (not the contraints)
# moments of normal . sigma against constants and linears.
for entity_id in range(3): # a triangle has 3 edges
for order in (0, 1):
dofs += [IntegralLegendreNormalNormalMoment(cell, entity_id, order, 6),
IntegralLegendreNormalTangentialMoment(cell, entity_id, order, 6)]
dof_ids[1][entity_id] = list(range(dof_cur, dof_cur+4))
dof_cur += 4
# internal dofs: constant moments of three unique components
Q = make_quadrature(cell, 2)
e1 = numpy.array([1.0, 0.0]) # euclidean basis 1
e2 = numpy.array([0.0, 1.0]) # euclidean basis 2
basis = [(e1, e1), (e1, e2), (e2, e2)] # basis for symmetric matrices
for (v1, v2) in basis:
v1v2t = numpy.outer(v1, v2)
fatqp = numpy.zeros((2, 2, len(Q.pts)))
for i, y in enumerate(v1v2t):
for j, x in enumerate(y):
for k in range(len(Q.pts)):
fatqp[i, j, k] = x
dofs.append(FIM(cell, Q, fatqp))
dof_ids[2][0] = list(range(dof_cur, dof_cur + 3))
dof_cur += 3
# put the constraint dofs last.
for entity_id in range(3):
dof = IntegralLegendreNormalNormalMoment(cell, entity_id, 2, 6)
dofs.append(dof)
dof_ids[1][entity_id].append(dof_cur)
dof_cur += 1
super(ArnoldWintherNCDual, self).__init__(dofs, cell, dof_ids)
class ArnoldWintherNC(CiarletElement):
"""The definition of the nonconforming Arnold-Winther element.
"""
def __init__(self, cell, degree):
assert degree == 2, "Only defined for degree 2"
Ps = ONSymTensorPolynomialSet(cell, degree)
Ls = ArnoldWintherNCDual(cell, degree)
mapping = "double contravariant piola"
super(ArnoldWintherNC, self).__init__(Ps, Ls, degree,
mapping=mapping)
class ArnoldWintherDual(DualSet):
def __init__(self, cell, degree):
if not degree == 3:
raise ValueError("Arnold-Winther elements are"
"only defined for degree 3.")
dofs = []
dof_ids = {}
dof_ids[0] = {0: [], 1: [], 2: []}
dof_ids[1] = {0: [], 1: [], 2: []}
dof_ids[2] = {0: []}
dof_cur = 0
# vertex dofs
vs = cell.get_vertices()
e1 = numpy.array([1.0, 0.0])
e2 = numpy.array([0.0, 1.0])
basis = [(e1, e1), (e1, e2), (e2, e2)]
dof_cur = 0
for entity_id in range(3):
node = tuple(vs[entity_id])
for (v1, v2) in basis:
dofs.append(InnerProduct(cell, v1, v2, node))
dof_ids[0][entity_id] = list(range(dof_cur, dof_cur + 3))
dof_cur += 3
# edge dofs now
# moments of normal . sigma against constants and linears.
for entity_id in range(3):
for order in (0, 1):
dofs += [IntegralLegendreNormalNormalMoment(cell, entity_id, order, 6),
IntegralLegendreNormalTangentialMoment(cell, entity_id, order, 6)]
dof_ids[1][entity_id] = list(range(dof_cur, dof_cur+4))
dof_cur += 4
# internal dofs: constant moments of three unique components
Q = make_quadrature(cell, 3)
e1 = numpy.array([1.0, 0.0]) # euclidean basis 1
e2 = numpy.array([0.0, 1.0]) # euclidean basis 2
basis = [(e1, e1), (e1, e2), (e2, e2)] # basis for symmetric matrices
for (v1, v2) in basis:
v1v2t = numpy.outer(v1, v2)
fatqp = numpy.zeros((2, 2, len(Q.pts)))
for k in range(len(Q.pts)):
fatqp[:, :, k] = v1v2t
dofs.append(FIM(cell, Q, fatqp))
dof_ids[2][0] = list(range(dof_cur, dof_cur + 3))
dof_cur += 3
# Constraint dofs
Q = make_quadrature(cell, 5)
onp = ONPolynomialSet(cell, 2, (2,))
pts = Q.get_points()
onpvals = onp.tabulate(pts)[0, 0]
for i in list(range(3, 6)) + list(range(9, 12)):
dofs.append(IntegralMomentOfTensorDivergence(cell, Q,
onpvals[i, :, :]))
dof_ids[2][0] += list(range(dof_cur, dof_cur+6))
super(ArnoldWintherDual, self).__init__(dofs, cell, dof_ids)
class ArnoldWinther(CiarletElement):
"""The definition of the conforming Arnold-Winther element.
"""
def __init__(self, cell, degree):
assert degree == 3, "Only defined for degree 3"
Ps = ONSymTensorPolynomialSet(cell, degree)
Ls = ArnoldWintherDual(cell, degree)
mapping = "double contravariant piola"
super(ArnoldWinther, self).__init__(Ps, Ls, degree, mapping=mapping)
fiat-2019.2.0~git20210419.7d418fa/FIAT/barycentric_interpolation.py 0000664 0000000 0000000 00000003105 14135323752 0024047 0 ustar 00root root 0000000 0000000 # Copyright (C) 2021 Pablo D. Brubeck
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Written by Pablo D. Brubeck (brubeck@protonmail.com), 2021
import numpy
def barycentric_interpolation(xsrc, xdst, order=0):
"""Return tabulations of a 1D Lagrange nodal basis via the second barycentric interpolation formula
See Berrut and Trefethen (2004) https://doi.org/10.1137/S0036144502417715 Eq. (4.2) & (9.4)
:arg xsrc: a :class:`numpy.array` with the nodes defining the Lagrange polynomial basis
:arg xdst: a :class:`numpy.array` with the interpolation points
:arg order: the integer order of differentiation
:returns: dict of tabulations up to the given order (in the same format as :meth:`~.CiarletElement.tabulate`)
"""
# w = barycentric weights
# D = spectral differentiation matrix (D.T : u(xsrc) -> u'(xsrc))
# I = barycentric interpolation matrix (I.T : u(xsrc) -> u(xdst))
D = numpy.add.outer(-xsrc, xsrc)
numpy.fill_diagonal(D, 1.0E0)
w = 1.0E0 / numpy.prod(D, axis=0)
D = numpy.divide.outer(w, w) / D
numpy.fill_diagonal(D, numpy.diag(D) - numpy.sum(D, axis=0))
I = numpy.add.outer(-xsrc, xdst)
idx = numpy.argwhere(numpy.isclose(I, 0.0E0, 1E-14))
I[idx[:, 0], idx[:, 1]] = 1.0E0
I = 1.0E0 / I
I *= w[:, None]
I[:, idx[:, 1]] = 0.0E0
I[idx[:, 0], idx[:, 1]] = 1.0E0
I = (1.0E0 / numpy.sum(I, axis=0)) * I
derivs = {(0,): I}
for k in range(0, order):
derivs[(k+1,)] = numpy.matmul(D, derivs[(k,)])
return derivs
fiat-2019.2.0~git20210419.7d418fa/FIAT/bell.py 0000664 0000000 0000000 00000004713 14135323752 0017517 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 Robert C. Kirby
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
# This is not quite Bell, but is 21-dofs and includes 3 extra constraint
# functionals. The first 18 basis functions are the reference element
# bfs, but the extra three are used in the transformation theory.
from FIAT import finite_element, polynomial_set, dual_set, functional
from FIAT.reference_element import TRIANGLE, ufc_simplex
class BellDualSet(dual_set.DualSet):
def __init__(self, ref_el):
entity_ids = {}
nodes = []
cur = 0
# make nodes by getting points
# need to do this dimension-by-dimension, facet-by-facet
top = ref_el.get_topology()
verts = ref_el.get_vertices()
sd = ref_el.get_spatial_dimension()
if ref_el.get_shape() != TRIANGLE:
raise ValueError("Bell only defined on triangles")
pd = functional.PointDerivative
# get jet at each vertex
entity_ids[0] = {}
for v in sorted(top[0]):
nodes.append(functional.PointEvaluation(ref_el, verts[v]))
# first derivatives
for i in range(sd):
alpha = [0] * sd
alpha[i] = 1
nodes.append(pd(ref_el, verts[v], alpha))
# second derivatives
alphas = [[2, 0], [1, 1], [0, 2]]
for alpha in alphas:
nodes.append(pd(ref_el, verts[v], alpha))
entity_ids[0][v] = list(range(cur, cur + 6))
cur += 6
# we need an edge quadrature rule for the moment
from FIAT.quadrature_schemes import create_quadrature
from FIAT.jacobi import eval_jacobi
rline = ufc_simplex(1)
q1d = create_quadrature(rline, 8)
q1dpts = q1d.get_points()
leg4_at_qpts = eval_jacobi(0, 0, 4, 2.0*q1dpts - 1)
imond = functional.IntegralMomentOfNormalDerivative
entity_ids[1] = {}
for e in sorted(top[1]):
entity_ids[1][e] = [18+e]
nodes.append(imond(ref_el, e, q1d, leg4_at_qpts))
entity_ids[2] = {0: []}
super(BellDualSet, self).__init__(nodes, ref_el, entity_ids)
class Bell(finite_element.CiarletElement):
"""The Bell finite element."""
def __init__(self, ref_el):
poly_set = polynomial_set.ONPolynomialSet(ref_el, 5)
dual = BellDualSet(ref_el)
super(Bell, self).__init__(poly_set, dual, 5)
fiat-2019.2.0~git20210419.7d418fa/FIAT/bernstein.py 0000664 0000000 0000000 00000014401 14135323752 0020565 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Miklós Homolya
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import math
import numpy
from FIAT.finite_element import FiniteElement
from FIAT.dual_set import DualSet
from FIAT.polynomial_set import mis
class BernsteinDualSet(DualSet):
"""The dual basis for Bernstein elements."""
def __init__(self, ref_el, degree):
# Initialise data structures
topology = ref_el.get_topology()
entity_ids = {dim: {entity_i: []
for entity_i in entities}
for dim, entities in topology.items()}
# Calculate inverse topology
inverse_topology = {vertices: (dim, entity_i)
for dim, entities in topology.items()
for entity_i, vertices in entities.items()}
# Generate triangular barycentric indices
dim = ref_el.get_spatial_dimension()
kss = mis(dim + 1, degree)
# Fill data structures
nodes = []
for i, ks in enumerate(kss):
vertices, = numpy.nonzero(ks)
entity_dim, entity_i = inverse_topology[tuple(vertices)]
entity_ids[entity_dim][entity_i].append(i)
# Leave nodes unimplemented for now
nodes.append(None)
super(BernsteinDualSet, self).__init__(nodes, ref_el, entity_ids)
class Bernstein(FiniteElement):
"""A finite element with Bernstein polynomials as basis functions."""
def __init__(self, ref_el, degree):
dual = BernsteinDualSet(ref_el, degree)
k = 0 # 0-form
super(Bernstein, self).__init__(ref_el, dual, degree, k)
def degree(self):
"""The degree of the polynomial space."""
return self.get_order()
def value_shape(self):
"""The value shape of the finite element functions."""
return ()
def tabulate(self, order, points, entity=None):
"""Return tabulated values of derivatives up to given order of
basis functions at given points.
:arg order: The maximum order of derivative.
:arg points: An iterable of points.
:arg entity: Optional (dimension, entity number) pair
indicating which topological entity of the
reference element to tabulate on. If ``None``,
default cell-wise tabulation is performed.
"""
# Transform points to reference cell coordinates
ref_el = self.get_reference_element()
if entity is None:
entity = (ref_el.get_spatial_dimension(), 0)
entity_dim, entity_id = entity
entity_transform = ref_el.get_entity_transform(entity_dim, entity_id)
cell_points = list(map(entity_transform, points))
# Construct Cartesian to Barycentric coordinate mapping
vs = numpy.asarray(ref_el.get_vertices())
B2R = numpy.vstack([vs.T, numpy.ones(len(vs))])
R2B = numpy.linalg.inv(B2R)
B = numpy.hstack([cell_points,
numpy.ones((len(cell_points), 1))]).dot(R2B.T)
# Evaluate everything
deg = self.degree()
dim = ref_el.get_spatial_dimension()
raw_result = {(alpha, i): vec
for i, ks in enumerate(mis(dim + 1, deg))
for o in range(order + 1)
for alpha, vec in bernstein_Dx(B, ks, o, R2B).items()}
# Rearrange result
space_dim = self.space_dimension()
dtype = numpy.array(list(raw_result.values())).dtype
result = {alpha: numpy.zeros((space_dim, len(cell_points)), dtype=dtype)
for o in range(order + 1)
for alpha in mis(dim, o)}
for (alpha, i), vec in raw_result.items():
result[alpha][i, :] = vec
return result
def bernstein_db(points, ks, alpha=None):
"""Evaluates Bernstein polynomials or its derivative at barycentric
points.
:arg points: array of points in barycentric coordinates
:arg ks: exponents defining the Bernstein polynomial
:arg alpha: derivative tuple
:returns: array of Bernstein polynomial values at given points.
"""
points = numpy.asarray(points)
ks = numpy.array(tuple(ks))
N, d_1 = points.shape
assert d_1 == len(ks)
if alpha is None:
alpha = numpy.zeros(d_1)
else:
alpha = numpy.array(tuple(alpha))
assert d_1 == len(alpha)
ls = ks - alpha
if any(k < 0 for k in ls):
return numpy.zeros(len(points))
elif all(k == 0 for k in ls):
return numpy.ones(len(points))
else:
# Calculate coefficient
coeff = math.factorial(ks.sum())
for k in ls:
coeff //= math.factorial(k)
return coeff * numpy.prod(points**ls, axis=1)
def bernstein_Dx(points, ks, order, R2B):
"""Evaluates Bernstein polynomials or its derivatives according to
reference coordinates.
:arg points: array of points in BARYCENTRIC COORDINATES
:arg ks: exponents defining the Bernstein polynomial
:arg alpha: derivative order (returns all derivatives of this
specified order)
:arg R2B: linear mapping from reference to barycentric coordinates
:returns: dictionary mapping from derivative tuples to arrays of
Bernstein polynomial values at given points.
"""
points = numpy.asarray(points)
ks = tuple(ks)
N, d_1 = points.shape
assert d_1 == len(ks)
# Collect derivatives according to barycentric coordinates
Db_map = {alpha: bernstein_db(points, ks, alpha)
for alpha in mis(d_1, order)}
# Arrange derivative tensor (barycentric coordinates)
dtype = numpy.array(list(Db_map.values())).dtype
Db_shape = (d_1,) * order
Db_tensor = numpy.empty(Db_shape + (N,), dtype=dtype)
for ds in numpy.ndindex(Db_shape):
alpha = [0] * d_1
for d in ds:
alpha[d] += 1
Db_tensor[ds + (slice(None),)] = Db_map[tuple(alpha)]
# Coordinate transformation: barycentric -> reference
result = {}
for alpha in mis(d_1 - 1, order):
values = Db_tensor
for d, k in enumerate(alpha):
for _ in range(k):
values = R2B[:, d].dot(values)
result[alpha] = values
return result
fiat-2019.2.0~git20210419.7d418fa/FIAT/brezzi_douglas_fortin_marini.py 0000664 0000000 0000000 00000010641 14135323752 0024541 0 ustar 00root root 0000000 0000000 from FIAT import (finite_element, functional, dual_set,
polynomial_set, lagrange)
import numpy
class BDFMDualSet(dual_set.DualSet):
def __init__(self, ref_el, degree):
# Initialize containers for map: mesh_entity -> dof number and
# dual basis
entity_ids = {}
nodes = []
sd = ref_el.get_spatial_dimension()
t = ref_el.get_topology()
# Define each functional for the dual set
# codimension 1 facet normals.
# note this will die for degree greater than 1.
for i in range(len(t[sd - 1])):
pts_cur = ref_el.make_points(sd - 1, i, sd + degree)
for j in range(len(pts_cur)):
pt_cur = pts_cur[j]
f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur)
nodes.append(f)
# codimension 1 facet tangents.
# because the tangent component is discontinuous, these actually
# count as internal nodes.
tangent_count = 0
for i in range(len(t[sd - 1])):
pts_cur = ref_el.make_points(sd - 1, i, sd + degree - 1)
tangent_count += len(pts_cur)
for j in range(len(pts_cur)):
pt_cur = pts_cur[j]
f = functional.PointEdgeTangentEvaluation(ref_el, i, pt_cur)
nodes.append(f)
# sets vertices (and in 3d, edges) to have no nodes
for i in range(sd - 1):
entity_ids[i] = {}
for j in range(len(t[i])):
entity_ids[i][j] = []
cur = 0
# set codimension 1 (edges 2d, faces 3d) dof
pts_facet_0 = ref_el.make_points(sd - 1, 0, sd + degree)
pts_per_facet = len(pts_facet_0)
entity_ids[sd - 1] = {}
for i in range(len(t[sd - 1])):
entity_ids[sd - 1][i] = list(range(cur, cur + pts_per_facet))
cur += pts_per_facet
# internal nodes
entity_ids[sd] = {0: list(range(cur, cur + tangent_count))}
cur += tangent_count
super(BDFMDualSet, self).__init__(nodes, ref_el, entity_ids)
def BDFMSpace(ref_el, order):
sd = ref_el.get_spatial_dimension()
if sd != 2:
raise Exception("BDFM_k elements only valid for dim 2")
# Note that order will be 2.
# Linear vector valued space. Since the embedding degree of this element
# is 2, this is implemented by taking the quadratic space and selecting
# the linear polynomials.
vec_poly_set = polynomial_set.ONPolynomialSet(ref_el, order, (sd,))
# Linears are the first three polynomials in each dimension.
vec_poly_set = vec_poly_set.take([0, 1, 2, 6, 7, 8])
# Scalar quadratic Lagrange element.
lagrange_ele = lagrange.Lagrange(ref_el, order)
# Select the dofs associated with the edges.
edge_dofs_dict = lagrange_ele.dual.get_entity_ids()[sd - 1]
edge_dofs = numpy.array([(edge, dof)
for edge, dofs in list(edge_dofs_dict.items())
for dof in dofs])
tangent_polys = lagrange_ele.poly_set.take(edge_dofs[:, 1])
new_coeffs = numpy.zeros((tangent_polys.get_num_members(), sd, tangent_polys.coeffs.shape[-1]))
# Outer product of the tangent vectors with the quadratic edge polynomials.
for i, (edge, dof) in enumerate(edge_dofs):
tangent = ref_el.compute_edge_tangent(edge)
new_coeffs[i, :, :] = numpy.outer(tangent, tangent_polys.coeffs[i, :])
bubble_set = polynomial_set.PolynomialSet(ref_el,
order,
order,
vec_poly_set.get_expansion_set(),
new_coeffs,
vec_poly_set.get_dmats())
element_set = polynomial_set.polynomial_set_union_normalized(bubble_set, vec_poly_set)
return element_set
class BrezziDouglasFortinMarini(finite_element.CiarletElement):
"""The BDFM element"""
def __init__(self, ref_el, degree):
if degree != 2:
raise Exception("BDFM_k elements only valid for k == 2")
poly_set = BDFMSpace(ref_el, degree)
dual = BDFMDualSet(ref_el, degree - 1)
formdegree = ref_el.get_spatial_dimension() - 1
super(BrezziDouglasFortinMarini, self).__init__(poly_set, dual, degree, formdegree,
mapping="contravariant piola")
fiat-2019.2.0~git20210419.7d418fa/FIAT/brezzi_douglas_marini.py 0000664 0000000 0000000 00000012253 14135323752 0023161 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008-2012 Robert C. Kirby (Texas Tech University)
# Modified by Andrew T. T. McRae (Imperial College London)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT import (finite_element, quadrature, functional, dual_set,
polynomial_set, nedelec)
from FIAT.check_format_variant import check_format_variant
class BDMDualSet(dual_set.DualSet):
def __init__(self, ref_el, degree, variant, quad_deg):
# Initialize containers for map: mesh_entity -> dof number and
# dual basis
entity_ids = {}
nodes = []
sd = ref_el.get_spatial_dimension()
t = ref_el.get_topology()
if variant == "integral":
facet = ref_el.get_facet_element()
# Facet nodes are \int_F v\cdot n p ds where p \in P_{q-1}
# degree is q - 1
Q = quadrature.make_quadrature(facet, quad_deg)
Pq = polynomial_set.ONPolynomialSet(facet, degree)
Pq_at_qpts = Pq.tabulate(Q.get_points())[tuple([0]*(sd - 1))]
for f in range(len(t[sd - 1])):
for i in range(Pq_at_qpts.shape[0]):
phi = Pq_at_qpts[i, :]
nodes.append(functional.IntegralMomentOfScaledNormalEvaluation(ref_el, Q, phi, f))
# internal nodes
if degree > 1:
Q = quadrature.make_quadrature(ref_el, quad_deg)
qpts = Q.get_points()
Nedel = nedelec.Nedelec(ref_el, degree - 1, variant)
Nedfs = Nedel.get_nodal_basis()
zero_index = tuple([0 for i in range(sd)])
Ned_at_qpts = Nedfs.tabulate(qpts)[zero_index]
for i in range(len(Ned_at_qpts)):
phi_cur = Ned_at_qpts[i, :]
l_cur = functional.FrobeniusIntegralMoment(ref_el, Q, phi_cur)
nodes.append(l_cur)
elif variant == "point":
# Define each functional for the dual set
# codimension 1 facets
for i in range(len(t[sd - 1])):
pts_cur = ref_el.make_points(sd - 1, i, sd + degree)
for j in range(len(pts_cur)):
pt_cur = pts_cur[j]
f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur)
nodes.append(f)
# internal nodes
if degree > 1:
Q = quadrature.make_quadrature(ref_el, 2 * (degree + 1))
qpts = Q.get_points()
Nedel = nedelec.Nedelec(ref_el, degree - 1, variant)
Nedfs = Nedel.get_nodal_basis()
zero_index = tuple([0 for i in range(sd)])
Ned_at_qpts = Nedfs.tabulate(qpts)[zero_index]
for i in range(len(Ned_at_qpts)):
phi_cur = Ned_at_qpts[i, :]
l_cur = functional.FrobeniusIntegralMoment(ref_el, Q, phi_cur)
nodes.append(l_cur)
# sets vertices (and in 3d, edges) to have no nodes
for i in range(sd - 1):
entity_ids[i] = {}
for j in range(len(t[i])):
entity_ids[i][j] = []
cur = 0
# set codimension 1 (edges 2d, faces 3d) dof
pts_facet_0 = ref_el.make_points(sd - 1, 0, sd + degree)
pts_per_facet = len(pts_facet_0)
entity_ids[sd - 1] = {}
for i in range(len(t[sd - 1])):
entity_ids[sd - 1][i] = list(range(cur, cur + pts_per_facet))
cur += pts_per_facet
# internal nodes, if applicable
entity_ids[sd] = {0: []}
if degree > 1:
num_internal_nodes = len(Ned_at_qpts)
entity_ids[sd][0] = list(range(cur, cur + num_internal_nodes))
super(BDMDualSet, self).__init__(nodes, ref_el, entity_ids)
class BrezziDouglasMarini(finite_element.CiarletElement):
"""
The BDM element
:arg ref_el: The reference element.
:arg k: The degree.
:arg variant: optional variant specifying the types of nodes.
variant can be chosen from ["point", "integral", "integral(quadrature_degree)"]
"point" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal
convergence order in the H(div)-norm
"integral" -> dofs are evaluated by quadrature rule. The quadrature degree is chosen to integrate
polynomials of degree 5*k so that most expressions will be interpolated exactly. This is important
when you want to have (nearly) divergence-preserving interpolation.
"integral(quadrature_degree)" -> dofs are evaluated by quadrature rule of degree quadrature_degree
"""
def __init__(self, ref_el, k, variant=None):
(variant, quad_deg) = check_format_variant(variant, k, "BDM")
if k < 1:
raise Exception("BDM_k elements only valid for k >= 1")
sd = ref_el.get_spatial_dimension()
poly_set = polynomial_set.ONPolynomialSet(ref_el, k, (sd, ))
dual = BDMDualSet(ref_el, k, variant, quad_deg)
formdegree = sd - 1 # (n-1)-form
super(BrezziDouglasMarini, self).__init__(poly_set, dual, k, formdegree,
mapping="contravariant piola")
fiat-2019.2.0~git20210419.7d418fa/FIAT/bubble.py 0000664 0000000 0000000 00000002566 14135323752 0020040 0 ustar 00root root 0000000 0000000 # Copyright (C) 2013 Andrew T. T. McRae (Imperial College London)
# Copyright (C) 2015 Jan Blechta
# Copyright (C) 2018 Patrick E. Farrell
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT.lagrange import Lagrange
from FIAT.restricted import RestrictedElement
from itertools import chain
class CodimBubble(RestrictedElement):
"""Bubbles of a certain codimension."""
def __init__(self, ref_el, degree, codim):
element = Lagrange(ref_el, degree)
cell_dim = ref_el.get_dimension()
assert cell_dim == max(element.entity_dofs().keys())
dofs = list(sorted(chain(*element.entity_dofs()[cell_dim - codim].values())))
if len(dofs) == 0:
raise RuntimeError('Bubble element of degree %d and codimension %d has no dofs' % (degree, codim))
super(CodimBubble, self).__init__(element, indices=dofs)
class Bubble(CodimBubble):
"""The bubble finite element: the dofs of the Lagrange FE in the interior of the cell"""
def __init__(self, ref_el, degree):
super(Bubble, self).__init__(ref_el, degree, codim=0)
class FacetBubble(CodimBubble):
"""The facet bubble finite element: the dofs of the Lagrange FE in the interior of the facets"""
def __init__(self, ref_el, degree):
super(FacetBubble, self).__init__(ref_el, degree, codim=1)
fiat-2019.2.0~git20210419.7d418fa/FIAT/check_format_variant.py 0000664 0000000 0000000 00000001755 14135323752 0022755 0 ustar 00root root 0000000 0000000 import re
import warnings
def check_format_variant(variant, degree, element):
if variant is None:
variant = "point"
warnings.simplefilter('always', DeprecationWarning)
warnings.warn('Variant of ' + element + ' element will change from point evaluation to integral evaluation.'
' You should project into variant="integral"', DeprecationWarning)
match = re.match(r"^integral(?:\((\d+)\))?$", variant)
if match:
variant = "integral"
quad_degree, = match.groups()
quad_degree = int(quad_degree) if quad_degree is not None else 5*(degree + 1)
if quad_degree < degree + 1:
raise ValueError("Warning, quadrature degree should be at least %s" % (degree + 1))
elif variant == "point":
quad_degree = None
else:
raise ValueError('Choose either variant="point" or variant="integral"'
'or variant="integral(Quadrature degree)"')
return (variant, quad_degree)
fiat-2019.2.0~git20210419.7d418fa/FIAT/crouzeix_raviart.py 0000664 0000000 0000000 00000004174 14135323752 0022202 0 ustar 00root root 0000000 0000000 # Copyright (C) 2010 Marie E. Rognes
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Written by Marie E. Rognes based on original
# implementation by Robert C. Kirby.
#
# Last changed: 2010-01-28
from FIAT import finite_element, polynomial_set, dual_set, functional
def _initialize_entity_ids(topology):
entity_ids = {}
for (i, entity) in list(topology.items()):
entity_ids[i] = {}
for j in entity:
entity_ids[i][j] = []
return entity_ids
class CrouzeixRaviartDualSet(dual_set.DualSet):
"""Dual basis for Crouzeix-Raviart element (linears continuous at
boundary midpoints)."""
def __init__(self, cell, degree):
# Get topology dictionary
d = cell.get_spatial_dimension()
topology = cell.get_topology()
# Initialize empty nodes and entity_ids
entity_ids = _initialize_entity_ids(topology)
nodes = [None for i in list(topology[d - 1].keys())]
# Construct nodes and entity_ids
for i in topology[d - 1]:
# Construct midpoint
x = cell.make_points(d - 1, i, d)[0]
# Degree of freedom number i is evaluation at midpoint
nodes[i] = functional.PointEvaluation(cell, x)
entity_ids[d - 1][i] += [i]
# Initialize super-class
super(CrouzeixRaviartDualSet, self).__init__(nodes, cell, entity_ids)
class CrouzeixRaviart(finite_element.CiarletElement):
"""The Crouzeix-Raviart finite element:
K: Triangle/Tetrahedron
Polynomial space: P_1
Dual basis: Evaluation at facet midpoints
"""
def __init__(self, cell, degree):
# Crouzeix Raviart is only defined for polynomial degree == 1
if not (degree == 1):
raise Exception("Crouzeix-Raviart only defined for degree 1")
# Construct polynomial spaces, dual basis and initialize
# FiniteElement
space = polynomial_set.ONPolynomialSet(cell, 1)
dual = CrouzeixRaviartDualSet(cell, 1)
super(CrouzeixRaviart, self).__init__(space, dual, 1)
fiat-2019.2.0~git20210419.7d418fa/FIAT/discontinuous.py 0000664 0000000 0000000 00000005661 14135323752 0021512 0 ustar 00root root 0000000 0000000 # Copyright (C) 2014 Andrew T. T. McRae (Imperial College London)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT.finite_element import CiarletElement
from FIAT.dual_set import DualSet
class DiscontinuousElement(CiarletElement):
"""A copy of an existing element where all dofs are associated with the cell"""
def __init__(self, element):
self._element = element
new_entity_ids = {}
topology = element.get_reference_element().get_topology()
for dim in sorted(topology):
new_entity_ids[dim] = {}
for ent in sorted(topology[dim]):
new_entity_ids[dim][ent] = []
new_entity_ids[dim][0] = list(range(element.space_dimension()))
# re-initialise the dual, so entity_closure_dofs is recalculated
self.dual = DualSet(element.dual_basis(), element.get_reference_element(), new_entity_ids)
# fully discontinuous
self.formdegree = element.get_reference_element().get_spatial_dimension()
def degree(self):
"Return the degree of the (embedding) polynomial space."
return self._element.degree()
def get_reference_element(self):
"Return the reference element for the finite element."
return self._element.get_reference_element()
def get_nodal_basis(self):
"""Return the nodal basis, encoded as a PolynomialSet object,
for the finite element."""
return self._element.get_nodal_basis()
def get_order(self):
"Return the order of the element (may be different from the degree)"
return self._element.get_order()
def get_coeffs(self):
"""Return the expansion coefficients for the basis of the
finite element."""
return self._element.get_coeffs()
def mapping(self):
"""Return a list of appropriate mappings from the reference
element to a physical element for each basis function of the
finite element."""
return self._element.mapping()
def num_sub_elements(self):
"Return the number of sub-elements."
return self._element.num_sub_elements()
def space_dimension(self):
"Return the dimension of the finite element space."
return self._element.space_dimension()
def tabulate(self, order, points, entity=None):
"""Return tabulated values of derivatives up to given order of
basis functions at given points."""
return self._element.tabulate(order, points, entity)
def value_shape(self):
"Return the value shape of the finite element functions."
return self._element.value_shape()
def dmats(self):
"""Return dmats: expansion coefficients for basis function
derivatives."""
return self._element.dmats()
def get_num_members(self, arg):
"Return number of members of the expansion set."
return self._element.get_num_members()
fiat-2019.2.0~git20210419.7d418fa/FIAT/discontinuous_lagrange.py 0000664 0000000 0000000 00000004057 14135323752 0023350 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008 Robert C. Kirby (Texas Tech University)
# Modified by Andrew T. T. McRae (Imperial College London)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT import finite_element, polynomial_set, dual_set, functional, P0
class DiscontinuousLagrangeDualSet(dual_set.DualSet):
"""The dual basis for Lagrange elements. This class works for
simplices of any dimension. Nodes are point evaluation at
equispaced points. This is the discontinuous version where
all nodes are topologically associated with the cell itself"""
def __init__(self, ref_el, degree):
entity_ids = {}
nodes = []
# make nodes by getting points
# need to do this dimension-by-dimension, facet-by-facet
top = ref_el.get_topology()
cur = 0
for dim in sorted(top):
entity_ids[dim] = {}
for entity in sorted(top[dim]):
pts_cur = ref_el.make_points(dim, entity, degree)
nodes_cur = [functional.PointEvaluation(ref_el, x)
for x in pts_cur]
nnodes_cur = len(nodes_cur)
nodes += nodes_cur
entity_ids[dim][entity] = []
cur += nnodes_cur
entity_ids[dim][0] = list(range(len(nodes)))
super(DiscontinuousLagrangeDualSet, self).__init__(nodes, ref_el, entity_ids)
class HigherOrderDiscontinuousLagrange(finite_element.CiarletElement):
"""The discontinuous Lagrange finite element. It is what it is."""
def __init__(self, ref_el, degree):
poly_set = polynomial_set.ONPolynomialSet(ref_el, degree)
dual = DiscontinuousLagrangeDualSet(ref_el, degree)
formdegree = ref_el.get_spatial_dimension() # n-form
super(HigherOrderDiscontinuousLagrange, self).__init__(poly_set, dual, degree, formdegree)
def DiscontinuousLagrange(ref_el, degree):
if degree == 0:
return P0.P0(ref_el)
else:
return HigherOrderDiscontinuousLagrange(ref_el, degree)
fiat-2019.2.0~git20210419.7d418fa/FIAT/discontinuous_pc.py 0000664 0000000 0000000 00000011271 14135323752 0022166 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 Cyrus Cheng (Imperial College London)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Modified by David A. Ham (david.ham@imperial.ac.uk), 2018
from FIAT import finite_element, polynomial_set, dual_set, functional
from FIAT.reference_element import (Point,
DefaultLine,
UFCInterval,
UFCQuadrilateral,
UFCHexahedron,
UFCTriangle,
UFCTetrahedron,
make_affine_mapping,
flatten_reference_cube)
from FIAT.P0 import P0Dual
import numpy as np
hypercube_simplex_map = {Point(): Point(),
DefaultLine(): DefaultLine(),
UFCInterval(): UFCInterval(),
UFCQuadrilateral(): UFCTriangle(),
UFCHexahedron(): UFCTetrahedron()}
class DPC0(finite_element.CiarletElement):
def __init__(self, ref_el):
flat_el = flatten_reference_cube(ref_el)
poly_set = polynomial_set.ONPolynomialSet(hypercube_simplex_map[flat_el], 0)
dual = P0Dual(ref_el)
degree = 0
formdegree = ref_el.get_spatial_dimension() # n-form
super(DPC0, self).__init__(poly_set=poly_set,
dual=dual,
order=degree,
ref_el=ref_el,
formdegree=formdegree)
class DPCDualSet(dual_set.DualSet):
"""The dual basis for DPC elements. This class works for
hypercubes of any dimension. Nodes are point evaluation at
equispaced points. This is the discontinuous version where
all nodes are topologically associated with the cell itself"""
def __init__(self, ref_el, flat_el, degree):
entity_ids = {}
nodes = []
# Change coordinates here.
# Vertices of the simplex corresponding to the reference element.
v_simplex = hypercube_simplex_map[flat_el].get_vertices()
# Vertices of the reference element.
v_hypercube = flat_el.get_vertices()
# For the mapping, first two vertices are unchanged in all dimensions.
v_ = [v_hypercube[0], v_hypercube[int(-0.5*len(v_hypercube))]]
# For dimension 1 upwards,
# take the next vertex and map it to the midpoint of the edge/face it belongs to, and shares
# with no other points.
for d in range(1, flat_el.get_dimension()):
v_.append(tuple(np.asarray(v_hypercube[flat_el.get_dimension() - d] +
np.average(np.asarray(v_hypercube[::2]), axis=0))))
A, b = make_affine_mapping(v_simplex, tuple(v_)) # Make affine mapping to be used later.
# make nodes by getting points
# need to do this dimension-by-dimension, facet-by-facet
top = hypercube_simplex_map[flat_el].get_topology()
cur = 0
for dim in sorted(top):
for entity in sorted(top[dim]):
pts_cur = hypercube_simplex_map[flat_el].make_points(dim, entity, degree)
pts_cur = [tuple(np.matmul(A, np.array(x)) + b) for x in pts_cur]
nodes_cur = [functional.PointEvaluation(flat_el, x)
for x in pts_cur]
nnodes_cur = len(nodes_cur)
nodes += nodes_cur
cur += nnodes_cur
cube_topology = ref_el.get_topology()
for dim in sorted(cube_topology):
entity_ids[dim] = {}
for entity in sorted(cube_topology[dim]):
entity_ids[dim][entity] = []
entity_ids[dim][0] = list(range(len(nodes)))
super(DPCDualSet, self).__init__(nodes, ref_el, entity_ids)
class HigherOrderDPC(finite_element.CiarletElement):
"""The DPC finite element. It is what it is."""
def __init__(self, ref_el, degree):
flat_el = flatten_reference_cube(ref_el)
poly_set = polynomial_set.ONPolynomialSet(hypercube_simplex_map[flat_el], degree)
dual = DPCDualSet(ref_el, flat_el, degree)
formdegree = flat_el.get_spatial_dimension() # n-form
super(HigherOrderDPC, self).__init__(poly_set=poly_set,
dual=dual,
order=degree,
ref_el=ref_el,
formdegree=formdegree)
def DPC(ref_el, degree):
if degree == 0:
return DPC0(ref_el)
else:
return HigherOrderDPC(ref_el, degree)
fiat-2019.2.0~git20210419.7d418fa/FIAT/discontinuous_raviart_thomas.py 0000664 0000000 0000000 00000004602 14135323752 0024607 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008-2012 Robert C. Kirby (Texas Tech University)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Modified by Jan Blechta 2014
from FIAT import dual_set, finite_element, functional
from FIAT.raviart_thomas import RTSpace
class DRTDualSet(dual_set.DualSet):
"""Dual basis for Raviart-Thomas elements consisting of point
evaluation of normals on facets of codimension 1 and internal
moments against polynomials. This is the discontinuous version
where all nodes are topologically associated with the cell itself"""
def __init__(self, ref_el, degree):
entity_ids = {}
nodes = []
sd = ref_el.get_spatial_dimension()
t = ref_el.get_topology()
# codimension 1 facets
for i in range(len(t[sd - 1])):
pts_cur = ref_el.make_points(sd - 1, i, sd + degree)
for j in range(len(pts_cur)):
pt_cur = pts_cur[j]
f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur)
nodes.append(f)
# internal nodes. Let's just use points at a lattice
if degree > 0:
cpe = functional.ComponentPointEvaluation
pts = ref_el.make_points(sd, 0, degree + sd)
for d in range(sd):
for i in range(len(pts)):
l_cur = cpe(ref_el, d, (sd,), pts[i])
nodes.append(l_cur)
# sets vertices (and in 3d, edges) to have no nodes
for i in range(sd - 1):
entity_ids[i] = {}
for j in range(len(t[i])):
entity_ids[i][j] = []
# set codimension 1 (edges 2d, faces 3d) to have no dofs
entity_ids[sd - 1] = {}
for i in range(len(t[sd - 1])):
entity_ids[sd - 1][i] = []
# cell dofs
entity_ids[sd] = {0: list(range(len(nodes)))}
super(DRTDualSet, self).__init__(nodes, ref_el, entity_ids)
class DiscontinuousRaviartThomas(finite_element.CiarletElement):
"""The discontinuous Raviart-Thomas finite element"""
def __init__(self, ref_el, q):
degree = q - 1
poly_set = RTSpace(ref_el, degree)
dual = DRTDualSet(ref_el, degree)
super(DiscontinuousRaviartThomas, self).__init__(poly_set, dual, degree,
mapping="contravariant piola")
fiat-2019.2.0~git20210419.7d418fa/FIAT/discontinuous_taylor.py 0000664 0000000 0000000 00000004102 14135323752 0023071 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008 Robert C. Kirby (Texas Tech University)
# Modified by Colin Cotter (Imperial College London)
# David Ham (Imperial College London)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT import finite_element, polynomial_set, dual_set, functional, P0, quadrature
from FIAT.polynomial_set import mis
import numpy
class DiscontinuousTaylorDualSet(dual_set.DualSet):
"""The dual basis for Taylor elements. This class works for
intervals. Nodes are function and derivative evaluation
at the midpoint."""
def __init__(self, ref_el, degree):
nodes = []
dim = ref_el.get_spatial_dimension()
Q = quadrature.make_quadrature(ref_el, 2 * (degree + 1))
f_at_qpts = numpy.ones(len(Q.wts))
nodes.append(functional.IntegralMoment(ref_el, Q, f_at_qpts))
vertices = ref_el.get_vertices()
midpoint = tuple(sum(numpy.array(vertices)) / len(vertices))
for k in range(1, degree + 1):
# Loop over all multi-indices of degree k.
for alpha in mis(dim, k):
nodes.append(functional.PointDerivative(ref_el, midpoint, alpha))
entity_ids = {d: {e: [] for e in ref_el.sub_entities[d]}
for d in range(dim + 1)}
entity_ids[dim][0] = list(range(len(nodes)))
super(DiscontinuousTaylorDualSet, self).__init__(nodes, ref_el, entity_ids)
class HigherOrderDiscontinuousTaylor(finite_element.CiarletElement):
"""The discontinuous Taylor finite element. Use a Taylor basis for DG."""
def __init__(self, ref_el, degree):
poly_set = polynomial_set.ONPolynomialSet(ref_el, degree)
dual = DiscontinuousTaylorDualSet(ref_el, degree)
formdegree = ref_el.get_spatial_dimension() # n-form
super(HigherOrderDiscontinuousTaylor, self).__init__(poly_set, dual, degree, formdegree)
def DiscontinuousTaylor(ref_el, degree):
if degree == 0:
return P0.P0(ref_el)
else:
return HigherOrderDiscontinuousTaylor(ref_el, degree)
fiat-2019.2.0~git20210419.7d418fa/FIAT/dual_set.py 0000664 0000000 0000000 00000012234 14135323752 0020376 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008-2012 Robert C. Kirby (Texas Tech University)
#
# Modified 2020 by the same at Baylor University.
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import numpy
import collections
from FIAT import polynomial_set
class DualSet(object):
def __init__(self, nodes, ref_el, entity_ids):
self.nodes = nodes
self.ref_el = ref_el
self.entity_ids = entity_ids
# Compute the nodes on the closure of each sub_entity.
self.entity_closure_ids = {}
for dim, entities in ref_el.sub_entities.items():
self.entity_closure_ids[dim] = {}
for e, sub_entities in entities.items():
ids = []
for d, se in sub_entities:
ids += self.entity_ids[d][se]
ids.sort()
self.entity_closure_ids[d][e] = ids
def get_nodes(self):
return self.nodes
def get_entity_closure_ids(self):
return self.entity_closure_ids
def get_entity_ids(self):
return self.entity_ids
def get_reference_element(self):
return self.ref_el
def to_riesz(self, poly_set):
r"""This method gives the action of the entire dual set
on each member of the expansion set underlying poly_set.
Then, applying the linear functionals of the dual set to an
arbitrary polynomial in poly_set is accomplished by (generalized)
matrix multiplication.
For scalar-valued spaces, this produces a matrix
:\math:`R_{i, j}` such that
:\math:`\ell_i(f) = \sum_{j} a_j \ell_i(\phi_j)`
for :\math:`f=\sum_{j} a_j \phi_j`.
More generally, it will have shape concatenating
the number of functionals in the dual set, the value shape
of functions it takes, and the number of members of the
expansion set.
"""
# This rather technical code queries the low-level information
# in pt_dict and deriv_dict
# for each functional to find out where it evaluates its
# inputs and/or their derivatives. Then, it tabulates the
# expansion set one time for all the function values and
# another for all of the derivatives. This circumvents
# needing to call the to_riesz method of each functional and
# also limits the number of different calls to tabulate.
tshape = self.nodes[0].target_shape
num_nodes = len(self.nodes)
es = poly_set.get_expansion_set()
ed = poly_set.get_embedded_degree()
num_exp = es.get_num_members(poly_set.get_embedded_degree())
riesz_shape = tuple([num_nodes] + list(tshape) + [num_exp])
self.mat = numpy.zeros(riesz_shape, "d")
# Dictionaries mapping pts to which functionals they come from
pts_to_ells = collections.OrderedDict()
dpts_to_ells = collections.OrderedDict()
for i, ell in enumerate(self.nodes):
for pt in ell.pt_dict:
if pt in pts_to_ells:
pts_to_ells[pt].append(i)
else:
pts_to_ells[pt] = [i]
for pt in ell.deriv_dict:
if pt in dpts_to_ells:
dpts_to_ells[pt].append(i)
else:
dpts_to_ells[pt] = [i]
# Now tabulate the function values
pts = list(pts_to_ells.keys())
expansion_values = es.tabulate(ed, pts)
for j, pt in enumerate(pts):
which_ells = pts_to_ells[pt]
for k in which_ells:
pt_dict = self.nodes[k].pt_dict
wc_list = pt_dict[pt]
for i in range(num_exp):
for (w, c) in wc_list:
self.mat[k][c][i] += w*expansion_values[i, j]
# Tabulate the derivative values that are needed
max_deriv_order = max([ell.max_deriv_order for ell in self.nodes])
if max_deriv_order > 0:
dpts = list(dpts_to_ells.keys())
# It's easiest/most efficient to get derivatives of the
# expansion set through the polynomial set interface.
# This is creating a short-lived set to do just this.
expansion = polynomial_set.ONPolynomialSet(self.ref_el, ed)
dexpansion_values = expansion.tabulate(dpts, max_deriv_order)
for j, pt in enumerate(dpts):
which_ells = dpts_to_ells[pt]
for k in which_ells:
dpt_dict = self.nodes[k].deriv_dict
wac_list = dpt_dict[pt]
for i in range(num_exp):
for (w, alpha, c) in wac_list:
self.mat[k][c][i] += w*dexpansion_values[alpha][i, j]
return self.mat
def make_entity_closure_ids(ref_el, entity_ids):
entity_closure_ids = {}
for dim, entities in ref_el.sub_entities.items():
entity_closure_ids[dim] = {}
for e, sub_entities in entities.items():
ids = []
for d, se in sub_entities:
ids += entity_ids[d][se]
ids.sort()
entity_closure_ids[d][e] = ids
return entity_closure_ids
fiat-2019.2.0~git20210419.7d418fa/FIAT/enriched.py 0000664 0000000 0000000 00000011451 14135323752 0020357 0 ustar 00root root 0000000 0000000 # Copyright (C) 2013 Andrew T. T. McRae, 2015-2016 Jan Blechta, and others
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from itertools import chain
import numpy
from FIAT.finite_element import FiniteElement
from FIAT.dual_set import DualSet
from FIAT.mixed import concatenate_entity_dofs
__all__ = ['EnrichedElement']
class EnrichedElement(FiniteElement):
"""Class implementing a finite element that combined the degrees of freedom
of two existing finite elements.
This is an implementation which does not care about orthogonality of
primal and dual basis.
"""
def __init__(self, *elements):
# Firstly, check it makes sense to enrich. Elements must have:
# - same reference element
# - same mapping
# - same value shape
if len(set(e.get_reference_element() for e in elements)) > 1:
raise ValueError("Elements must be defined on the same reference element")
if len(set(m for e in elements for m in e.mapping())) > 1:
raise ValueError("Elements must have same mapping")
if len(set(e.value_shape() for e in elements)) > 1:
raise ValueError("Elements must have the same value shape")
# order is at least max, possibly more, though getting this
# right isn't important AFAIK
order = max(e.get_order() for e in elements)
# form degree is essentially max (not true for Hdiv/Hcurl,
# but this will raise an error above anyway).
# E.g. an H^1 function enriched with an L^2 is now just L^2.
if any(e.get_formdegree() is None for e in elements):
formdegree = None
else:
formdegree = max(e.get_formdegree() for e in elements)
# set up reference element and mapping, following checks above
ref_el, = set(e.get_reference_element() for e in elements)
mapping, = set(m for e in elements for m in e.mapping())
# set up entity_ids - for each geometric entity, just concatenate
# the entities of the constituent elements
entity_ids = concatenate_entity_dofs(ref_el, elements)
# set up dual basis - just concatenation
nodes = list(chain.from_iterable(e.dual_basis() for e in elements))
dual = DualSet(nodes, ref_el, entity_ids)
super(EnrichedElement, self).__init__(ref_el, dual, order, formdegree, mapping)
# required degree (for quadrature) is definitely max
self.polydegree = max(e.degree() for e in elements)
# Store subelements
self._elements = elements
def elements(self):
"Return reference to original subelements"
return self._elements
def degree(self):
"""Return the degree of the (embedding) polynomial space."""
return self.polydegree
def get_nodal_basis(self):
"""Return the nodal basis, encoded as a PolynomialSet object,
for the finite element."""
raise NotImplementedError("get_nodal_basis not implemented")
def get_coeffs(self):
"""Return the expansion coefficients for the basis of the
finite element."""
raise NotImplementedError("get_coeffs not implemented")
def tabulate(self, order, points, entity=None):
"""Return tabulated values of derivatives up to given order of
basis functions at given points."""
num_components = numpy.prod(self.value_shape())
table_shape = (self.space_dimension(), num_components, len(points))
table = {}
irange = slice(0)
for element in self._elements:
etable = element.tabulate(order, points, entity)
irange = slice(irange.stop, irange.stop + element.space_dimension())
# Insert element table into table
for dtuple in etable.keys():
if dtuple not in table:
if num_components == 1:
table[dtuple] = numpy.zeros((self.space_dimension(), len(points)),
dtype=etable[dtuple].dtype)
else:
table[dtuple] = numpy.zeros(table_shape,
dtype=etable[dtuple].dtype)
table[dtuple][irange][:] = etable[dtuple]
return table
def value_shape(self):
"""Return the value shape of the finite element functions."""
result, = set(e.value_shape() for e in self._elements)
return result
def dmats(self):
"""Return dmats: expansion coefficients for basis function
derivatives."""
raise NotImplementedError("dmats not implemented")
def get_num_members(self, arg):
"""Return number of members of the expansion set."""
raise NotImplementedError("get_num_members not implemented")
fiat-2019.2.0~git20210419.7d418fa/FIAT/expansions.py 0000664 0000000 0000000 00000036152 14135323752 0020772 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008 Robert C. Kirby (Texas Tech University)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
"""Principal orthogonal expansion functions as defined by Karniadakis
and Sherwin. These are parametrized over a reference element so as
to allow users to get coordinates that they want."""
import numpy
import math
import sympy
from FIAT import reference_element
from FIAT import jacobi
def jrc(a, b, n):
an = (2*n+1+a+b)*(2*n+2+a+b) / (2*(n+1)*(n+1+a+b))
bn = (a*a-b*b) * (2*n+1+a+b) / (2*(n+1)*(2*n+a+b)*(n+1+a+b))
cn = (n+a)*(n+b)*(2*n+2+a+b) / ((n+1)*(n+1+a+b)*(2*n+a+b))
return an, bn, cn
def _tabulate_dpts(tabulator, D, n, order, pts):
X = sympy.DeferredVector('x')
def form_derivative(F):
'''Forms the derivative recursively, i.e.,
F -> [F_x, F_y, F_z],
[F_x, F_y, F_z] -> [[F_xx, F_xy, F_xz],
[F_yx, F_yy, F_yz],
[F_zx, F_zy, F_zz]]
and so forth.
'''
out = []
try:
out = [sympy.diff(F, X[j]) for j in range(D)]
except (AttributeError, ValueError):
# Intercept errors like
# AttributeError: 'list' object has no attribute
# 'free_symbols'
for f in F:
out.append(form_derivative(f))
return out
def numpy_lambdify(X, F):
'''Unfortunately, SymPy's own lambdify() doesn't work well with
NumPy in that simple functions like
lambda x: 1.0,
when evaluated with NumPy arrays, return just "1.0" instead of
an array of 1s with the same shape as x. This function does that.
'''
try:
lambda_x = [numpy_lambdify(X, f) for f in F]
except TypeError: # 'function' object is not iterable
# SymPy's lambdify also works on functions that return arrays.
# However, use it componentwise here so we can add 0*x to each
# component individually. This is necessary to maintain shapes
# if evaluated with NumPy arrays.
lmbd_tmp = sympy.lambdify(X, F)
lambda_x = lambda x: lmbd_tmp(x) + 0 * x[0]
return lambda_x
def evaluate_lambda(lmbd, x):
'''Properly evaluate lambda expressions recursively for iterables.
'''
try:
values = [evaluate_lambda(l, x) for l in lmbd]
except TypeError: # 'function' object is not iterable
values = lmbd(x)
return values
# Tabulate symbolically
symbolic_tab = tabulator(n, X)
# Make sure that the entries of symbolic_tab are lists so we can
# append derivatives
symbolic_tab = [[phi] for phi in symbolic_tab]
#
data = (order + 1) * [None]
for r in range(order + 1):
shape = [len(symbolic_tab), len(pts)] + r * [D]
data[r] = numpy.empty(shape)
for i, phi in enumerate(symbolic_tab):
# Evaluate the function numerically using lambda expressions
deriv_lambda = numpy_lambdify(X, phi[r])
data[r][i] = \
numpy.array(evaluate_lambda(deriv_lambda, pts.T)).T
# Symbolically compute the next derivative.
# This actually happens once too many here; never mind for
# now.
phi.append(form_derivative(phi[-1]))
return data
def xi_triangle(eta):
"""Maps from [-1,1]^2 to the (-1,1) reference triangle."""
eta1, eta2 = eta
xi1 = 0.5 * (1.0 + eta1) * (1.0 - eta2) - 1.0
xi2 = eta2
return (xi1, xi2)
def xi_tetrahedron(eta):
"""Maps from [-1,1]^3 to the -1/1 reference tetrahedron."""
eta1, eta2, eta3 = eta
xi1 = 0.25 * (1. + eta1) * (1. - eta2) * (1. - eta3) - 1.
xi2 = 0.5 * (1. + eta2) * (1. - eta3) - 1.
xi3 = eta3
return xi1, xi2, xi3
class PointExpansionSet(object):
"""Evaluates the point basis on a point reference element."""
def __init__(self, ref_el):
if ref_el.get_spatial_dimension() != 0:
raise ValueError("Must have a point")
self.ref_el = ref_el
self.base_ref_el = reference_element.Point()
def get_num_members(self, n):
return 1
def tabulate(self, n, pts):
"""Returns a numpy array A[i,j] = phi_i(pts[j]) = 1.0."""
assert n == 0
return numpy.ones((1, len(pts)))
def tabulate_derivatives(self, n, pts):
"""Returns a numpy array of size A where A[i,j] = phi_i(pts[j])
but where each element is an empty tuple (). This maintains
compatibility with the interfaces of the interval, triangle and
tetrahedron expansions."""
deriv_vals = numpy.empty_like(self.tabulate(n, pts), dtype=tuple)
deriv_vals.fill(())
return deriv_vals
class LineExpansionSet(object):
"""Evaluates the Legendre basis on a line reference element."""
def __init__(self, ref_el):
if ref_el.get_spatial_dimension() != 1:
raise Exception("Must have a line")
self.ref_el = ref_el
self.base_ref_el = reference_element.DefaultLine()
v1 = ref_el.get_vertices()
v2 = self.base_ref_el.get_vertices()
self.A, self.b = reference_element.make_affine_mapping(v1, v2)
self.mapping = lambda x: numpy.dot(self.A, x) + self.b
self.scale = numpy.sqrt(numpy.linalg.det(self.A))
def get_num_members(self, n):
return n + 1
def tabulate(self, n, pts):
"""Returns a numpy array A[i,j] = phi_i(pts[j])"""
if len(pts) > 0:
ref_pts = numpy.array([self.mapping(pt) for pt in pts])
psitilde_as = jacobi.eval_jacobi_batch(0, 0, n, ref_pts)
results = numpy.zeros((n + 1, len(pts)), type(pts[0][0]))
for k in range(n + 1):
results[k, :] = psitilde_as[k, :] * math.sqrt(k + 0.5)
return results
else:
return []
def tabulate_derivatives(self, n, pts):
"""Returns a tuple of length one (A,) such that
A[i,j] = D phi_i(pts[j]). The tuple is returned for
compatibility with the interfaces of the triangle and
tetrahedron expansions."""
ref_pts = numpy.array([self.mapping(pt) for pt in pts])
psitilde_as_derivs = jacobi.eval_jacobi_deriv_batch(0, 0, n, ref_pts)
# Jacobi polynomials defined on [-1, 1], first derivatives need scaling
psitilde_as_derivs *= 2.0 / self.ref_el.volume()
results = numpy.zeros((n + 1, len(pts)), "d")
for k in range(0, n + 1):
results[k, :] = psitilde_as_derivs[k, :] * numpy.sqrt(k + 0.5)
vals = self.tabulate(n, pts)
deriv_vals = (results,)
# Create the ordinary data structure.
dv = []
for i in range(vals.shape[0]):
dv.append([])
for j in range(vals.shape[1]):
dv[-1].append((vals[i][j], [deriv_vals[0][i][j]]))
return dv
class TriangleExpansionSet(object):
"""Evaluates the orthonormal Dubiner basis on a triangular
reference element."""
def __init__(self, ref_el):
if ref_el.get_spatial_dimension() != 2:
raise Exception("Must have a triangle")
self.ref_el = ref_el
self.base_ref_el = reference_element.DefaultTriangle()
v1 = ref_el.get_vertices()
v2 = self.base_ref_el.get_vertices()
self.A, self.b = reference_element.make_affine_mapping(v1, v2)
self.mapping = lambda x: numpy.dot(self.A, x) + self.b
# self.scale = numpy.sqrt(numpy.linalg.det(self.A))
def get_num_members(self, n):
return (n + 1) * (n + 2) // 2
def tabulate(self, n, pts):
if len(pts) == 0:
return numpy.array([])
else:
return numpy.array(self._tabulate(n, numpy.array(pts).T))
def _tabulate(self, n, pts):
'''A version of tabulate() that also works for a single point.
'''
m1, m2 = self.A.shape
ref_pts = [sum(self.A[i][j] * pts[j] for j in range(m2)) + self.b[i]
for i in range(m1)]
def idx(p, q):
return (p + q) * (p + q + 1) // 2 + q
results = ((n + 1) * (n + 2) // 2) * [None]
results[0] = 1.0 \
+ pts[0] - pts[0] \
+ pts[1] - pts[1]
if n == 0:
return results
x = ref_pts[0]
y = ref_pts[1]
f1 = (1.0 + 2 * x + y) / 2.0
f2 = (1.0 - y) / 2.0
f3 = f2**2
results[idx(1, 0)] = f1
for p in range(1, n):
a = (2.0 * p + 1) / (1.0 + p)
# b = p / (p+1.0)
results[idx(p+1, 0)] = a * f1 * results[idx(p, 0)] \
- p/(1.0+p) * f3 * results[idx(p-1, 0)]
for p in range(n):
results[idx(p, 1)] = 0.5 * (1+2.0*p+(3.0+2.0*p)*y) \
* results[idx(p, 0)]
for p in range(n - 1):
for q in range(1, n - p):
(a1, a2, a3) = jrc(2 * p + 1, 0, q)
results[idx(p, q+1)] = \
(a1 * y + a2) * results[idx(p, q)] \
- a3 * results[idx(p, q-1)]
for p in range(n + 1):
for q in range(n - p + 1):
results[idx(p, q)] *= math.sqrt((p + 0.5) * (p + q + 1.0))
return results
# return self.scale * results
def tabulate_derivatives(self, n, pts):
order = 1
data = _tabulate_dpts(self._tabulate, 2, n, order, numpy.array(pts))
# Put data in the required data structure, i.e.,
# k-tuples which contain the value, and the k-1 derivatives
# (gradient, Hessian, ...)
m = data[0].shape[0]
n = data[0].shape[1]
data2 = [[tuple([data[r][i][j] for r in range(order+1)])
for j in range(n)]
for i in range(m)]
return data2
def tabulate_jet(self, n, pts, order=1):
return _tabulate_dpts(self._tabulate, 2, n, order, numpy.array(pts))
class TetrahedronExpansionSet(object):
"""Collapsed orthonormal polynomial expanion on a tetrahedron."""
def __init__(self, ref_el):
if ref_el.get_spatial_dimension() != 3:
raise Exception("Must be a tetrahedron")
self.ref_el = ref_el
self.base_ref_el = reference_element.DefaultTetrahedron()
v1 = ref_el.get_vertices()
v2 = self.base_ref_el.get_vertices()
self.A, self.b = reference_element.make_affine_mapping(v1, v2)
self.mapping = lambda x: numpy.dot(self.A, x) + self.b
self.scale = numpy.sqrt(numpy.linalg.det(self.A))
def get_num_members(self, n):
return (n + 1) * (n + 2) * (n + 3) // 6
def tabulate(self, n, pts):
if len(pts) == 0:
return numpy.array([])
else:
return numpy.array(self._tabulate(n, numpy.array(pts).T))
def _tabulate(self, n, pts):
'''A version of tabulate() that also works for a single point.
'''
m1, m2 = self.A.shape
ref_pts = [sum(self.A[i][j] * pts[j] for j in range(m2)) + self.b[i]
for i in range(m1)]
def idx(p, q, r):
return (p + q + r)*(p + q + r + 1)*(p + q + r + 2)//6 + (q + r)*(q + r + 1)//2 + r
results = ((n + 1) * (n + 2) * (n + 3) // 6) * [None]
results[0] = 1.0 \
+ pts[0] - pts[0] \
+ pts[1] - pts[1] \
+ pts[2] - pts[2]
if n == 0:
return results
x = ref_pts[0]
y = ref_pts[1]
z = ref_pts[2]
factor1 = 0.5 * (2.0 + 2.0 * x + y + z)
factor2 = (0.5 * (y + z))**2
factor3 = 0.5 * (1 + 2.0 * y + z)
factor4 = 0.5 * (1 - z)
factor5 = factor4**2
results[idx(1, 0, 0)] = factor1
for p in range(1, n):
a1 = (2.0 * p + 1.0) / (p + 1.0)
a2 = p / (p + 1.0)
results[idx(p+1, 0, 0)] = a1 * factor1 * results[idx(p, 0, 0)] \
- a2 * factor2 * results[idx(p-1, 0, 0)]
# q = 1
for p in range(0, n):
results[idx(p, 1, 0)] = results[idx(p, 0, 0)] \
* (p * (1.0 + y) + (2.0 + 3.0 * y + z) / 2)
for p in range(0, n - 1):
for q in range(1, n - p):
(aq, bq, cq) = jrc(2 * p + 1, 0, q)
qmcoeff = aq * factor3 + bq * factor4
qm1coeff = cq * factor5
results[idx(p, q+1, 0)] = qmcoeff * results[idx(p, q, 0)] \
- qm1coeff * results[idx(p, q-1, 0)]
# now handle r=1
for p in range(n):
for q in range(n - p):
results[idx(p, q, 1)] = results[idx(p, q, 0)] \
* (1.0 + p + q + (2.0 + q + p) * z)
# general r by recurrence
for p in range(n - 1):
for q in range(0, n - p - 1):
for r in range(1, n - p - q):
ar, br, cr = jrc(2 * p + 2 * q + 2, 0, r)
results[idx(p, q, r+1)] = \
(ar * z + br) * results[idx(p, q, r)] \
- cr * results[idx(p, q, r-1)]
for p in range(n + 1):
for q in range(n - p + 1):
for r in range(n - p - q + 1):
results[idx(p, q, r)] *= \
math.sqrt((p+0.5)*(p+q+1.0)*(p+q+r+1.5))
return results
def tabulate_derivatives(self, n, pts):
order = 1
D = 3
data = _tabulate_dpts(self._tabulate, D, n, order, numpy.array(pts))
# Put data in the required data structure, i.e.,
# k-tuples which contain the value, and the k-1 derivatives
# (gradient, Hessian, ...)
m = data[0].shape[0]
n = data[0].shape[1]
data2 = [[tuple([data[r][i][j] for r in range(order + 1)])
for j in range(n)]
for i in range(m)]
return data2
def tabulate_jet(self, n, pts, order=1):
return _tabulate_dpts(self._tabulate, 3, n, order, numpy.array(pts))
def get_expansion_set(ref_el):
"""Returns an ExpansionSet instance appopriate for the given
reference element."""
if ref_el.get_shape() == reference_element.POINT:
return PointExpansionSet(ref_el)
elif ref_el.get_shape() == reference_element.LINE:
return LineExpansionSet(ref_el)
elif ref_el.get_shape() == reference_element.TRIANGLE:
return TriangleExpansionSet(ref_el)
elif ref_el.get_shape() == reference_element.TETRAHEDRON:
return TetrahedronExpansionSet(ref_el)
else:
raise Exception("Unknown reference element type.")
def polynomial_dimension(ref_el, degree):
"""Returns the dimension of the space of polynomials of degree no
greater than degree on the reference element."""
if ref_el.get_shape() == reference_element.POINT:
if degree > 0:
raise ValueError("Only degree zero polynomials supported on point elements.")
return 1
elif ref_el.get_shape() == reference_element.LINE:
return max(0, degree + 1)
elif ref_el.get_shape() == reference_element.TRIANGLE:
return max((degree + 1) * (degree + 2) // 2, 0)
elif ref_el.get_shape() == reference_element.TETRAHEDRON:
return max(0, (degree + 1) * (degree + 2) * (degree + 3) // 6)
else:
raise ValueError("Unknown reference element type.")
fiat-2019.2.0~git20210419.7d418fa/FIAT/finite_element.py 0000664 0000000 0000000 00000021206 14135323752 0021564 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008 Robert C. Kirby (Texas Tech University)
# Modified by Andrew T. T. McRae (Imperial College London)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Modified by David A. Ham (david.ham@imperial.ac.uk), 2014
# Modified by Thomas H. Gibson (t.gibson15@imperial.ac.uk), 2016
import numpy
from FIAT.polynomial_set import PolynomialSet
from FIAT.quadrature_schemes import create_quadrature
class FiniteElement(object):
"""Class implementing a basic abstraction template for general
finite element families. Finite elements which inherit from
this class are non-nodal unless they are CiarletElement subclasses.
"""
def __init__(self, ref_el, dual, order, formdegree=None, mapping="affine"):
# Relevant attributes that do not necessarily depend on a PolynomialSet object:
# The order (degree) of the polynomial basis
self.order = order
self.formdegree = formdegree
# The reference element and the appropriate dual
self.ref_el = ref_el
self.dual = dual
# The appropriate mapping for the finite element space
self._mapping = mapping
def get_reference_element(self):
"""Return the reference element for the finite element."""
return self.ref_el
def get_dual_set(self):
"""Return the dual for the finite element."""
return self.dual
def get_order(self):
"""Return the order of the element (may be different from the degree)."""
return self.order
def dual_basis(self):
"""Return the dual basis (list of functionals) for the finite
element."""
return self.dual.get_nodes()
def entity_dofs(self):
"""Return the map of topological entities to degrees of
freedom for the finite element."""
return self.dual.get_entity_ids()
def entity_closure_dofs(self):
"""Return the map of topological entities to degrees of
freedom on the closure of those entities for the finite element."""
return self.dual.get_entity_closure_ids()
def get_formdegree(self):
"""Return the degree of the associated form (FEEC)"""
return self.formdegree
def mapping(self):
"""Return a list of appropriate mappings from the reference
element to a physical element for each basis function of the
finite element."""
return [self._mapping] * self.space_dimension()
def num_sub_elements(self):
"""Return the number of sub-elements."""
return 1
def space_dimension(self):
"""Return the dimension of the finite element space."""
return len(self.dual_basis())
def tabulate(self, order, points, entity=None):
"""Return tabulated values of derivatives up to given order of
basis functions at given points.
:arg order: The maximum order of derivative.
:arg points: An iterable of points.
:arg entity: Optional (dimension, entity number) pair
indicating which topological entity of the
reference element to tabulate on. If ``None``,
default cell-wise tabulation is performed.
"""
raise NotImplementedError("Must be specified in the element subclass of FiniteElement.")
@staticmethod
def is_nodal():
"""True if primal and dual bases are orthogonal. If false,
dual basis is not implemented or is undefined.
Subclasses may not necessarily be nodal, unless it is a CiarletElement.
"""
return False
class CiarletElement(FiniteElement):
"""Class implementing Ciarlet's abstraction of a finite element
being a domain, function space, and set of nodes.
Elements derived from this class are nodal finite elements, with a nodal
basis generated from polynomials encoded in a `PolynomialSet`.
"""
def __init__(self, poly_set, dual, order, formdegree=None, mapping="affine", ref_el=None):
ref_el = ref_el or poly_set.get_reference_element()
super(CiarletElement, self).__init__(ref_el, dual, order, formdegree, mapping)
# build generalized Vandermonde matrix
old_coeffs = poly_set.get_coeffs()
dualmat = dual.to_riesz(poly_set)
shp = dualmat.shape
if len(shp) > 2:
num_cols = numpy.prod(shp[1:])
A = numpy.reshape(dualmat, (dualmat.shape[0], num_cols))
B = numpy.reshape(old_coeffs, (old_coeffs.shape[0], num_cols))
else:
A = dualmat
B = old_coeffs
V = numpy.dot(A, numpy.transpose(B))
self.V = V
Vinv = numpy.linalg.inv(V)
new_coeffs_flat = numpy.dot(numpy.transpose(Vinv), B)
new_shp = tuple([new_coeffs_flat.shape[0]] + list(shp[1:]))
new_coeffs = numpy.reshape(new_coeffs_flat, new_shp)
self.poly_set = PolynomialSet(ref_el,
poly_set.get_degree(),
poly_set.get_embedded_degree(),
poly_set.get_expansion_set(),
new_coeffs,
poly_set.get_dmats())
def degree(self):
"Return the degree of the (embedding) polynomial space."
return self.poly_set.get_embedded_degree()
def get_nodal_basis(self):
"""Return the nodal basis, encoded as a PolynomialSet object,
for the finite element."""
return self.poly_set
def get_coeffs(self):
"""Return the expansion coefficients for the basis of the
finite element."""
return self.poly_set.get_coeffs()
def tabulate(self, order, points, entity=None):
"""Return tabulated values of derivatives up to given order of
basis functions at given points.
:arg order: The maximum order of derivative.
:arg points: An iterable of points.
:arg entity: Optional (dimension, entity number) pair
indicating which topological entity of the
reference element to tabulate on. If ``None``,
default cell-wise tabulation is performed.
"""
if entity is None:
entity = (self.ref_el.get_spatial_dimension(), 0)
entity_dim, entity_id = entity
transform = self.ref_el.get_entity_transform(entity_dim, entity_id)
return self.poly_set.tabulate(list(map(transform, points)), order)
def value_shape(self):
"Return the value shape of the finite element functions."
return self.poly_set.get_shape()
def dmats(self):
"""Return dmats: expansion coefficients for basis function
derivatives."""
return self.get_nodal_basis().get_dmats()
def get_num_members(self, arg):
"Return number of members of the expansion set."
return self.get_nodal_basis().get_expansion_set().get_num_members(arg)
@staticmethod
def is_nodal():
"""True if primal and dual bases are orthogonal. If false,
dual basis is not implemented or is undefined.
All implementations/subclasses are nodal including this one.
"""
return True
def entity_support_dofs(elem, entity_dim):
"""Return the map of entity id to the degrees of freedom for which the
corresponding basis functions take non-zero values
:arg elem: FIAT finite element
:arg entity_dim: Dimension of the cell subentity.
"""
if not hasattr(elem, "_entity_support_dofs"):
elem._entity_support_dofs = {}
cache = elem._entity_support_dofs
try:
return cache[entity_dim]
except KeyError:
pass
ref_el = elem.get_reference_element()
dim = ref_el.get_spatial_dimension()
entity_cell = ref_el.construct_subelement(entity_dim)
quad = create_quadrature(entity_cell, max(2*elem.degree(), 1))
weights = quad.get_weights()
eps = 1.e-8 # Is this a safe value?
result = {}
for f in elem.entity_dofs()[entity_dim].keys():
entity_transform = ref_el.get_entity_transform(entity_dim, f)
points = list(map(entity_transform, quad.get_points()))
# Integrate the square of the basis functions on the facet.
vals = numpy.double(elem.tabulate(0, points)[(0,) * dim])
# Ints contains the square of the basis functions
# integrated over the facet.
if elem.value_shape():
# Vector-valued functions.
ints = numpy.dot(numpy.einsum("...ij,...ij->...j", vals, vals), weights)
else:
ints = numpy.dot(vals**2, weights)
result[f] = [dof for dof, i in enumerate(ints) if i > eps]
cache[entity_dim] = result
return result
fiat-2019.2.0~git20210419.7d418fa/FIAT/functional.py 0000664 0000000 0000000 00000072570 14135323752 0020751 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008 Robert C. Kirby (Texas Tech University)
#
# Modified 2020 by the same from Baylor University
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
# functionals require:
# - a degree of accuracy (-1 indicates that it works for all functions
# such as point evaluation)
# - a reference element domain
# - type information
from collections import OrderedDict
from itertools import chain
import numpy
import sympy
from FIAT import polynomial_set
from FIAT.quadrature import GaussLegendreQuadratureLineRule, QuadratureRule
from FIAT.reference_element import UFCInterval as interval
def index_iterator(shp):
"""Constructs a generator iterating over all indices in
shp in generalized column-major order So if shp = (2,2), then we
construct the sequence (0,0),(0,1),(1,0),(1,1)"""
if len(shp) == 0:
return
elif len(shp) == 1:
for i in range(shp[0]):
yield [i]
else:
shp_foo = shp[1:]
for i in range(shp[0]):
for foo in index_iterator(shp_foo):
yield [i] + foo
class Functional(object):
r"""Abstract class representing a linear functional.
All FIAT functionals are discrete in the sense that
they are written as a weighted sum of (derivatives of components of) their
argument evaluated at particular points.
:arg ref_el: a :class:`Cell`
:arg target_shape: a tuple indicating the value shape of functions on
the functional operates (e.g. if the function eats 2-vectors
then target_shape is (2,) and if it eats scalars then
target_shape is ()
:arg pt_dict: A dict mapping points to lists of information about
how the functional is evaluated. Each entry in the list takes
the form of a tuple (wt, comp) so that (at least if the
deriv_dict argument is empty), the functional takes the form
:math:`\ell(f) = \sum_{q=1}^{N_q} \sum_{k=1}^{K_q} w^q_k f_{c_k}(x_q)`
where :math:`f_{c_k}` indicates a particular vector or tensor component
:arg deriv_dict: A dict that is similar to `pt_dict`, although the entries
of each list are tuples (wt, alpha, comp) with alpha a tuple
of nonnegative integers corresponding to the order of partial
differentiation in each spatial direction.
:arg functional_type: a string labeling the kind of functional
this is.
"""
def __init__(self, ref_el, target_shape, pt_dict, deriv_dict,
functional_type):
self.ref_el = ref_el
self.target_shape = target_shape
self.pt_dict = pt_dict
self.deriv_dict = deriv_dict
self.functional_type = functional_type
if len(deriv_dict) > 0:
per_point = list(chain(*deriv_dict.values()))
alphas = [tuple(foo[1]) for foo in per_point]
self.max_deriv_order = max([sum(foo) for foo in alphas])
else:
self.max_deriv_order = 0
def evaluate(self, f):
"""Obsolete and broken functional evaluation.
To evaluate the functional, call it on the target function:
functional(function)
"""
raise AttributeError("To evaluate the functional just call it on a function.")
def __call__(self, fn):
raise NotImplementedError("Evaluation is not yet implemented for %s" % type(self))
def get_point_dict(self):
"""Returns the functional information, which is a dictionary
mapping each point in the support of the functional to a list
of pairs containing the weight and component."""
return self.pt_dict
def get_reference_element(self):
"""Returns the reference element."""
return self.ref_el
def get_type_tag(self):
"""Returns the type of function (e.g. point evaluation or
normal component, which is probably handy for clients of FIAT"""
return self.functional_type
def to_riesz(self, poly_set):
r"""Constructs an array representation of the functional so
that the functional may be applied to a function expressed in
in terms of the expansion set underlying `poly_set` by means
of contracting coefficients.
That is, `poly_set` will have members all expressed in the
form :math:`p = \sum_{i} \alpha^i \phi_i`
where :math:`\{\phi_i\}_{i}` is some orthonormal expansion set
and :math:`\alpha^i` are coefficients. Note: the orthonormal
expansion set is always scalar-valued but if the members of
`poly_set` are vector or tensor valued the :math:`\alpha^i`
will be scalars or vectors.
This function constructs a tensor :math:`R` such that the
contraction of :math:`R` with the array of coefficients
:math:`\alpha` produces the effect of :math:`\ell(f)`
In the case of scalar-value functions, :math:`R` is just a
vector of the same length as the expansion set, and
:math:`R_i = \ell(\phi_i)`. For vector-valued spaces,
:math:`R_{ij}` will be :math:`\ell(e^i \phi_j)` where
:math:`e^i` is the canonical unit vector nonzero only in one
entry :math:`i`.
"""
es = poly_set.get_expansion_set()
ed = poly_set.get_embedded_degree()
nexp = es.get_num_members(ed)
pt_dict = self.get_point_dict()
pts = list(pt_dict.keys())
npts = len(pts)
bfs = es.tabulate(ed, pts)
result = numpy.zeros(poly_set.coeffs.shape[1:], "d")
# loop over points
for j in range(npts):
pt_cur = pts[j]
wc_list = pt_dict[pt_cur]
# loop over expansion functions
for i in range(nexp):
for (w, c) in wc_list:
result[c][i] += w * bfs[i, j]
if self.deriv_dict:
dpt_dict = self.deriv_dict
# this makes things quicker since it uses dmats after
# instantiation
es_foo = polynomial_set.ONPolynomialSet(self.ref_el, ed)
dpts = list(dpt_dict.keys())
dbfs = es_foo.tabulate(dpts, self.max_deriv_order)
ndpts = len(dpts)
for j in range(ndpts):
dpt_cur = dpts[j]
wac_list = dpt_dict[dpt_cur]
for i in range(nexp):
for (w, alpha, c) in wac_list:
result[c][i] += w * dbfs[tuple(alpha)][i, j]
return result
def tostr(self):
return self.functional_type
class PointEvaluation(Functional):
"""Class representing point evaluation of scalar functions at a
particular point x."""
def __init__(self, ref_el, x):
pt_dict = {x: [(1.0, tuple())]}
Functional.__init__(self, ref_el, tuple(), pt_dict, {}, "PointEval")
def __call__(self, fn):
"""Evaluate the functional on the function fn."""
return fn(tuple(self.pt_dict.keys())[0])
def tostr(self):
x = list(map(str, list(self.pt_dict.keys())[0]))
return "u(%s)" % (','.join(x),)
class ComponentPointEvaluation(Functional):
"""Class representing point evaluation of a particular component
of a vector function at a particular point x."""
def __init__(self, ref_el, comp, shp, x):
if len(shp) != 1:
raise Exception("Illegal shape")
if comp < 0 or comp >= shp[0]:
raise Exception("Illegal component")
self.comp = comp
pt_dict = {x: [(1.0, (comp,))]}
Functional.__init__(self, ref_el, shp, pt_dict, {},
"ComponentPointEval")
def tostr(self):
x = list(map(str, list(self.pt_dict.keys())[0]))
return "(u[%d](%s)" % (self.comp, ','.join(x))
class PointDerivative(Functional):
"""Class representing point partial differentiation of scalar
functions at a particular point x."""
def __init__(self, ref_el, x, alpha):
dpt_dict = {x: [(1.0, tuple(alpha), tuple())]}
self.alpha = tuple(alpha)
self.order = sum(self.alpha)
Functional.__init__(self, ref_el, tuple(), {}, dpt_dict, "PointDeriv")
def __call__(self, fn):
"""Evaluate the functional on the function fn. Note that this depends
on sympy being able to differentiate fn."""
x = list(self.deriv_dict.keys())[0]
X = sympy.DeferredVector('x')
dX = numpy.asarray([X[i] for i in range(len(x))])
dvars = tuple(d for d, a in zip(dX, self.alpha)
for count in range(a))
return sympy.diff(fn(X), *dvars).evalf(subs=dict(zip(dX, x)))
class PointNormalDerivative(Functional):
"""Represents d/dn at a point on a facet."""
def __init__(self, ref_el, facet_no, pt):
n = ref_el.compute_normal(facet_no)
self.n = n
sd = ref_el.get_spatial_dimension()
alphas = []
for i in range(sd):
alpha = [0] * sd
alpha[i] = 1
alphas.append(alpha)
dpt_dict = {pt: [(n[i], tuple(alphas[i]), tuple()) for i in range(sd)]}
Functional.__init__(self, ref_el, tuple(), {}, dpt_dict, "PointNormalDeriv")
class PointNormalSecondDerivative(Functional):
"""Represents d^/dn^2 at a point on a facet."""
def __init__(self, ref_el, facet_no, pt):
n = ref_el.compute_normal(facet_no)
self.n = n
sd = ref_el.get_spatial_dimension()
tau = numpy.zeros((sd*(sd+1)//2,))
alphas = []
cur = 0
for i in range(sd):
for j in range(i, sd):
alpha = [0] * sd
alpha[i] += 1
alpha[j] += 1
alphas.append(tuple(alpha))
tau[cur] = n[i]*n[j]
cur += 1
self.tau = tau
self.alphas = alphas
dpt_dict = {pt: [(n[i], alphas[i], tuple()) for i in range(sd)]}
Functional.__init__(self, ref_el, tuple(), {}, dpt_dict, "PointNormalDeriv")
class IntegralMoment(Functional):
"""Functional representing integral of the input against some tabulated function f.
:arg ref_el: a :class:`Cell`.
:arg Q: a :class:`QuadratureRule`.
:arg f_at_qpts: an array tabulating the function f at the quadrature
points.
:arg comp: Optional argument indicating that only a particular
component of the input function should be integrated against f
:arg shp: Optional argument giving the value shape of input functions.
"""
def __init__(self, ref_el, Q, f_at_qpts, comp=tuple(), shp=tuple()):
self.Q = Q
qpts, qwts = Q.get_points(), Q.get_weights()
pt_dict = OrderedDict()
self.comp = comp
for i in range(len(qpts)):
pt_cur = tuple(qpts[i])
pt_dict[pt_cur] = [(qwts[i] * f_at_qpts[i], comp)]
Functional.__init__(self, ref_el, shp, pt_dict, {}, "IntegralMoment")
def __call__(self, fn):
"""Evaluate the functional on the function fn."""
pts = list(self.pt_dict.keys())
wts = numpy.array([foo[0][0] for foo in list(self.pt_dict.values())])
result = numpy.dot([fn(p) for p in pts], wts)
if self.comp:
result = result[self.comp]
return result
class IntegralMomentOfNormalDerivative(Functional):
"""Functional giving normal derivative integrated against some function on a facet."""
def __init__(self, ref_el, facet_no, Q, f_at_qpts):
n = ref_el.compute_normal(facet_no)
self.n = n
self.f_at_qpts = f_at_qpts
self.Q = Q
sd = ref_el.get_spatial_dimension()
# map points onto facet
fmap = ref_el.get_entity_transform(sd-1, facet_no)
qpts, qwts = Q.get_points(), Q.get_weights()
dpts = [fmap(pt) for pt in qpts]
self.dpts = dpts
dpt_dict = OrderedDict()
alphas = [tuple([1 if j == i else 0 for j in range(sd)]) for i in range(sd)]
for j, pt in enumerate(dpts):
dpt_dict[tuple(pt)] = [(qwts[j]*n[i]*f_at_qpts[j], alphas[i], tuple()) for i in range(sd)]
Functional.__init__(self, ref_el, tuple(),
{}, dpt_dict, "IntegralMomentOfNormalDerivative")
class IntegralLegendreDirectionalMoment(Functional):
"""Moment of v.s against a Legendre polynomial over an edge"""
def __init__(self, cell, s, entity, mom_deg, comp_deg, nm=""):
sd = cell.get_spatial_dimension()
assert sd == 2
shp = (sd,)
quadpoints = comp_deg + 1
Q = GaussLegendreQuadratureLineRule(interval(), quadpoints)
legendre = numpy.polynomial.legendre.legval(2*Q.get_points()-1, [0]*mom_deg + [1])
f_at_qpts = numpy.array([s*legendre[i] for i in range(quadpoints)])
fmap = cell.get_entity_transform(sd-1, entity)
mappedqpts = [fmap(pt) for pt in Q.get_points()]
mappedQ = QuadratureRule(cell, mappedqpts, Q.get_weights())
qwts = mappedQ.wts
qpts = mappedQ.pts
pt_dict = OrderedDict()
for k in range(len(qpts)):
pt_cur = tuple(qpts[k])
pt_dict[pt_cur] = [(qwts[k] * f_at_qpts[k, i], (i,))
for i in range(2)]
super().__init__(cell, shp, pt_dict, {}, nm)
class IntegralLegendreNormalMoment(IntegralLegendreDirectionalMoment):
"""Moment of v.n against a Legendre polynomial over an edge"""
def __init__(self, cell, entity, mom_deg, comp_deg):
n = cell.compute_scaled_normal(entity)
super().__init__(cell, n, entity, mom_deg, comp_deg,
"IntegralLegendreNormalMoment")
class IntegralLegendreTangentialMoment(IntegralLegendreDirectionalMoment):
"""Moment of v.t against a Legendre polynomial over an edge"""
def __init__(self, cell, entity, mom_deg, comp_deg):
t = cell.compute_edge_tangent(entity)
super().__init__(cell, t, entity, mom_deg, comp_deg,
"IntegralLegendreTangentialMoment")
class IntegralLegendreBidirectionalMoment(Functional):
"""Moment of dot(s1, dot(tau, s2)) against Legendre on entity, multiplied by the size of the reference facet"""
def __init__(self, cell, s1, s2, entity, mom_deg, comp_deg, nm=""):
# mom_deg is degree of moment, comp_deg is the total degree of
# polynomial you might need to integrate (or something like that)
sd = cell.get_spatial_dimension()
shp = (sd, sd)
s1s2T = numpy.outer(s1, s2)
quadpoints = comp_deg + 1
Q = GaussLegendreQuadratureLineRule(interval(), quadpoints)
# The volume squared gets the Jacobian mapping from line interval
# and the edge length into the functional.
legendre = numpy.polynomial.legendre.legval(2*Q.get_points()-1, [0]*mom_deg + [1]) * numpy.abs(cell.volume_of_subcomplex(1, entity))**2
f_at_qpts = numpy.array([s1s2T*legendre[i] for i in range(quadpoints)])
# Map the quadrature points
fmap = cell.get_entity_transform(sd-1, entity)
mappedqpts = [fmap(pt) for pt in Q.get_points()]
mappedQ = QuadratureRule(cell, mappedqpts, Q.get_weights())
pt_dict = OrderedDict()
qpts = mappedQ.pts
qwts = mappedQ.wts
for k in range(len(qpts)):
pt_cur = tuple(qpts[k])
pt_dict[pt_cur] = [(qwts[k] * f_at_qpts[k, i, j], (i, j))
for (i, j) in index_iterator(shp)]
super().__init__(cell, shp, pt_dict, {}, nm)
class IntegralLegendreNormalNormalMoment(IntegralLegendreBidirectionalMoment):
"""Moment of dot(n, dot(tau, n)) against Legendre on entity."""
def __init__(self, cell, entity, mom_deg, comp_deg):
n = cell.compute_normal(entity)
super().__init__(cell, n, n, entity, mom_deg, comp_deg,
"IntegralNormalNormalLegendreMoment")
class IntegralLegendreNormalTangentialMoment(IntegralLegendreBidirectionalMoment):
"""Moment of dot(n, dot(tau, t)) against Legendre on entity."""
def __init__(self, cell, entity, mom_deg, comp_deg):
n = cell.compute_normal(entity)
t = cell.compute_normalized_edge_tangent(entity)
super().__init__(cell, n, t, entity, mom_deg, comp_deg,
"IntegralNormalTangentialLegendreMoment")
class IntegralMomentOfDivergence(Functional):
"""Functional representing integral of the divergence of the input
against some tabulated function f."""
def __init__(self, ref_el, Q, f_at_qpts):
self.f_at_qpts = f_at_qpts
self.Q = Q
sd = ref_el.get_spatial_dimension()
qpts, qwts = Q.get_points(), Q.get_weights()
dpts = qpts
self.dpts = dpts
dpt_dict = OrderedDict()
alphas = [tuple([1 if j == i else 0 for j in range(sd)]) for i in range(sd)]
for j, pt in enumerate(dpts):
dpt_dict[tuple(pt)] = [(qwts[j]*f_at_qpts[j], alphas[i], (i,)) for i in range(sd)]
super().__init__(ref_el, tuple(), {}, dpt_dict,
"IntegralMomentOfDivergence")
class IntegralMomentOfTensorDivergence(Functional):
"""Like IntegralMomentOfDivergence, but on symmetric tensors."""
def __init__(self, ref_el, Q, f_at_qpts):
self.f_at_qpts = f_at_qpts
self.Q = Q
qpts, qwts = Q.get_points(), Q.get_weights()
nqp = len(qpts)
dpts = qpts
self.dpts = dpts
assert len(f_at_qpts.shape) == 2
assert f_at_qpts.shape[0] == 2
assert f_at_qpts.shape[1] == nqp
sd = ref_el.get_spatial_dimension()
dpt_dict = OrderedDict()
alphas = [tuple([1 if j == i else 0 for j in range(sd)]) for i in range(sd)]
for q, pt in enumerate(dpts):
dpt_dict[tuple(pt)] = [(qwts[q]*f_at_qpts[i, q], alphas[j], (i, j)) for i in range(2) for j in range(2)]
super().__init__(ref_el, tuple(), {}, dpt_dict,
"IntegralMomentOfDivergence")
class FrobeniusIntegralMoment(Functional):
def __init__(self, ref_el, Q, f_at_qpts):
# f_at_qpts is (some shape) x num_qpts
shp = tuple(f_at_qpts.shape[:-1])
if len(Q.get_points()) != f_at_qpts.shape[-1]:
raise Exception("Mismatch in number of quadrature points and values")
qpts, qwts = Q.get_points(), Q.get_weights()
pt_dict = {}
for i, (pt_cur, wt_cur) in enumerate(zip(map(tuple, qpts), qwts)):
pt_dict[pt_cur] = []
for alfa in index_iterator(shp):
qpidx = tuple(alfa + [i])
pt_dict[pt_cur].append((wt_cur * f_at_qpts[qpidx], tuple(alfa)))
super().__init__(ref_el, shp, pt_dict, {}, "FrobeniusIntegralMoment")
class PointNormalEvaluation(Functional):
"""Implements the evaluation of the normal component of a vector at a
point on a facet of codimension 1."""
def __init__(self, ref_el, facet_no, pt):
n = ref_el.compute_normal(facet_no)
self.n = n
sd = ref_el.get_spatial_dimension()
pt_dict = {pt: [(n[i], (i,)) for i in range(sd)]}
shp = (sd,)
super().__init__(ref_el, shp, pt_dict, {}, "PointNormalEval")
class PointEdgeTangentEvaluation(Functional):
"""Implements the evaluation of the tangential component of a
vector at a point on a facet of dimension 1."""
def __init__(self, ref_el, edge_no, pt):
t = ref_el.compute_edge_tangent(edge_no)
self.t = t
sd = ref_el.get_spatial_dimension()
pt_dict = {pt: [(t[i], (i,)) for i in range(sd)]}
shp = (sd,)
super().__init__(ref_el, shp, pt_dict, {}, "PointEdgeTangent")
def tostr(self):
x = list(map(str, list(self.pt_dict.keys())[0]))
return "(u.t)(%s)" % (','.join(x),)
class IntegralMomentOfEdgeTangentEvaluation(Functional):
r"""
\int_e v\cdot t p ds
p \in Polynomials
:arg ref_el: reference element for which e is a dim-1 entity
:arg Q: quadrature rule on the face
:arg P_at_qpts: polynomials evaluated at quad points
:arg edge: which edge.
"""
def __init__(self, ref_el, Q, P_at_qpts, edge):
t = ref_el.compute_edge_tangent(edge)
sd = ref_el.get_spatial_dimension()
transform = ref_el.get_entity_transform(1, edge)
pts = tuple(map(lambda p: tuple(transform(p)), Q.get_points()))
weights = Q.get_weights()
pt_dict = OrderedDict()
for pt, wgt, phi in zip(pts, weights, P_at_qpts):
pt_dict[pt] = [(wgt*phi*t[i], (i, )) for i in range(sd)]
super().__init__(ref_el, (sd, ), pt_dict, {},
"IntegralMomentOfEdgeTangentEvaluation")
class PointFaceTangentEvaluation(Functional):
"""Implements the evaluation of a tangential component of a
vector at a point on a facet of codimension 1."""
def __init__(self, ref_el, face_no, tno, pt):
t = ref_el.compute_face_tangents(face_no)[tno]
self.t = t
self.tno = tno
sd = ref_el.get_spatial_dimension()
pt_dict = {pt: [(t[i], (i,)) for i in range(sd)]}
shp = (sd,)
Functional.__init__(self, ref_el, shp, pt_dict, {}, "PointFaceTangent")
def tostr(self):
x = list(map(str, list(self.pt_dict.keys())[0]))
return "(u.t%d)(%s)" % (self.tno, ','.join(x),)
class IntegralMomentOfFaceTangentEvaluation(Functional):
r"""
\int_F v \times n \cdot p ds
p \in Polynomials
:arg ref_el: reference element for which F is a codim-1 entity
:arg Q: quadrature rule on the face
:arg P_at_qpts: polynomials evaluated at quad points
:arg facet: which facet.
"""
def __init__(self, ref_el, Q, P_at_qpts, facet):
P_at_qpts = [[P_at_qpts[0][i], P_at_qpts[1][i], P_at_qpts[2][i]]
for i in range(P_at_qpts.shape[1])]
n = ref_el.compute_scaled_normal(facet)
sd = ref_el.get_spatial_dimension()
transform = ref_el.get_entity_transform(sd-1, facet)
pts = tuple(map(lambda p: tuple(transform(p)), Q.get_points()))
weights = Q.get_weights()
pt_dict = OrderedDict()
for pt, wgt, phi in zip(pts, weights, P_at_qpts):
phixn = [phi[1]*n[2] - phi[2]*n[1],
phi[2]*n[0] - phi[0]*n[2],
phi[0]*n[1] - phi[1]*n[0]]
pt_dict[pt] = [(wgt*(-n[2]*phixn[1]+n[1]*phixn[2]), (0, )),
(wgt*(n[2]*phixn[0]-n[0]*phixn[2]), (1, )),
(wgt*(-n[1]*phixn[0]+n[0]*phixn[1]), (2, ))]
super().__init__(ref_el, (sd, ), pt_dict, {},
"IntegralMomentOfFaceTangentEvaluation")
class MonkIntegralMoment(Functional):
r"""
face nodes are \int_F v\cdot p dA where p \in P_{q-2}(f)^3 with p \cdot n = 0
(cmp. Peter Monk - Finite Element Methods for Maxwell's equations p. 129)
Note that we don't scale by the area of the facet
:arg ref_el: reference element for which F is a codim-1 entity
:arg Q: quadrature rule on the face
:arg P_at_qpts: polynomials evaluated at quad points
:arg facet: which facet.
"""
def __init__(self, ref_el, Q, P_at_qpts, facet):
sd = ref_el.get_spatial_dimension()
weights = Q.get_weights()
pt_dict = OrderedDict()
transform = ref_el.get_entity_transform(sd-1, facet)
pts = tuple(map(lambda p: tuple(transform(p)), Q.get_points()))
for pt, wgt, phi in zip(pts, weights, P_at_qpts):
pt_dict[pt] = [(wgt*phi[i], (i, )) for i in range(sd)]
super().__init__(ref_el, (sd, ), pt_dict, {}, "MonkIntegralMoment")
class PointScaledNormalEvaluation(Functional):
"""Implements the evaluation of the normal component of a vector at a
point on a facet of codimension 1, where the normal is scaled by
the volume of that facet."""
def __init__(self, ref_el, facet_no, pt):
self.n = ref_el.compute_scaled_normal(facet_no)
sd = ref_el.get_spatial_dimension()
shp = (sd,)
pt_dict = {pt: [(self.n[i], (i,)) for i in range(sd)]}
super().__init__(ref_el, shp, pt_dict, {}, "PointScaledNormalEval")
def tostr(self):
x = list(map(str, list(self.pt_dict.keys())[0]))
return "(u.n)(%s)" % (','.join(x),)
class IntegralMomentOfScaledNormalEvaluation(Functional):
r"""
\int_F v\cdot n p ds
p \in Polynomials
:arg ref_el: reference element for which F is a codim-1 entity
:arg Q: quadrature rule on the face
:arg P_at_qpts: polynomials evaluated at quad points
:arg facet: which facet.
"""
def __init__(self, ref_el, Q, P_at_qpts, facet):
n = ref_el.compute_scaled_normal(facet)
sd = ref_el.get_spatial_dimension()
transform = ref_el.get_entity_transform(sd - 1, facet)
pts = tuple(map(lambda p: tuple(transform(p)), Q.get_points()))
weights = Q.get_weights()
pt_dict = OrderedDict()
for pt, wgt, phi in zip(pts, weights, P_at_qpts):
pt_dict[pt] = [(wgt*phi*n[i], (i, )) for i in range(sd)]
super().__init__(ref_el, (sd, ), pt_dict, {}, "IntegralMomentOfScaledNormalEvaluation")
class PointwiseInnerProductEvaluation(Functional):
"""
This is a functional on symmetric 2-tensor fields. Let u be such a
field, p be a point, and v,w be vectors. This implements the evaluation
v^T u(p) w.
Clearly v^iu_{ij}w^j = u_{ij}v^iw^j. Thus the value can be computed
from the Frobenius inner product of u with wv^T. This gives the
correct weights.
"""
def __init__(self, ref_el, v, w, p):
sd = ref_el.get_spatial_dimension()
wvT = numpy.outer(w, v)
pt_dict = {p: [(wvT[i][j], (i, j))
for i, j in index_iterator((sd, sd))]}
shp = (sd, sd)
super().__init__(ref_el, shp, pt_dict, {}, "PointwiseInnerProductEval")
class TensorBidirectionalMomentInnerProductEvaluation(Functional):
r"""
This is a functional on symmetric 2-tensor fields. Let u be such a
field, f a function tabulated at points, and v,w be vectors. This implements the evaluation
\int v^T u(x) w f(x).
Clearly v^iu_{ij}w^j = u_{ij}v^iw^j. Thus the value can be computed
from the Frobenius inner product of u with wv^T. This gives the
correct weights.
"""
def __init__(self, ref_el, v, w, Q, f_at_qpts, comp_deg):
sd = ref_el.get_spatial_dimension()
wvT = numpy.outer(w, v)
qpts, qwts = Q.get_points(), Q.get_weights()
pt_dict = {}
for k, pt in enumerate(map(tuple(qpts))):
pt_dict[pt] = []
for i, j in index_iterator((sd, sd)):
pt_dict[pt].append((qwts[k] * wvT[i][j] * f_at_qpts[i, j, k]),
(i, j))
shp = (sd, sd)
super().__init__(ref_el, shp, pt_dict, {}, "TensorBidirectionalMomentInnerProductEvaluation")
class IntegralMomentOfNormalEvaluation(Functional):
r"""
\int_F v\cdot n p ds
p \in Polynomials
:arg ref_el: reference element for which F is a codim-1 entity
:arg Q: quadrature rule on the face
:arg P_at_qpts: polynomials evaluated at quad points
:arg facet: which facet.
"""
def __init__(self, ref_el, Q, P_at_qpts, facet):
# scaling on the normal is ok because edge length then weights
# the reference element quadrature appropriately
n = ref_el.compute_scaled_normal(facet)
sd = ref_el.get_spatial_dimension()
transform = ref_el.get_entity_transform(sd - 1, facet)
pts = tuple(map(lambda p: tuple(transform(p)), Q.get_points()))
weights = Q.get_weights()
pt_dict = OrderedDict()
for pt, wgt, phi in zip(pts, weights, P_at_qpts):
pt_dict[pt] = [(wgt*phi*n[i], (i, )) for i in range(sd)]
super().__init__(ref_el, (sd, ), pt_dict, {}, "IntegralMomentOfScaledNormalEvaluation")
class IntegralMomentOfTangentialEvaluation(Functional):
r"""
\int_F v\cdot n p ds
p \in Polynomials
:arg ref_el: reference element for which F is a codim-1 entity
:arg Q: quadrature rule on the face
:arg P_at_qpts: polynomials evaluated at quad points
:arg facet: which facet.
"""
def __init__(self, ref_el, Q, P_at_qpts, facet):
# scaling on the tangent is ok because edge length then weights
# the reference element quadrature appropriately
sd = ref_el.get_spatial_dimension()
assert sd == 2
t = ref_el.compute_edge_tangent(facet)
transform = ref_el.get_entity_transform(sd - 1, facet)
pts = tuple(map(lambda p: tuple(transform(p)), Q.get_points()))
weights = Q.get_weights()
pt_dict = OrderedDict()
for pt, wgt, phi in zip(pts, weights, P_at_qpts):
pt_dict[pt] = [(wgt*phi*t[i], (i, )) for i in range(sd)]
super().__init__(ref_el, (sd, ), pt_dict, {}, "IntegralMomentOfScaledTangentialEvaluation")
class IntegralMomentOfNormalNormalEvaluation(Functional):
r"""
\int_F (n^T tau n) p ds
p \in Polynomials
:arg ref_el: reference element for which F is a codim-1 entity
:arg Q: quadrature rule on the face
:arg P_at_qpts: polynomials evaluated at quad points
:arg facet: which facet.
"""
def __init__(self, ref_el, Q, P_at_qpts, facet):
# scaling on the normal is ok because edge length then weights
# the reference element quadrature appropriately
n = ref_el.compute_scaled_normal(facet)
sd = ref_el.get_spatial_dimension()
transform = ref_el.get_entity_transform(sd - 1, facet)
pts = tuple(map(lambda p: tuple(transform(p)), Q.get_points()))
weights = Q.get_weights()
pt_dict = OrderedDict()
for pt, wgt, phi in zip(pts, weights, P_at_qpts):
pt_dict[pt] = [(wgt*phi*n[i], (i, )) for i in range(sd)]
super().__init__(ref_el, (sd, ), pt_dict, {}, "IntegralMomentOfScaledNormalEvaluation")
fiat-2019.2.0~git20210419.7d418fa/FIAT/gauss_legendre.py 0000664 0000000 0000000 00000004307 14135323752 0021567 0 ustar 00root root 0000000 0000000 # Copyright (C) 2015 Imperial College London and others.
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Written by David A. Ham (david.ham@imperial.ac.uk), 2015
#
# Modified by Pablo D. Brubeck (brubeck@protonmail.com), 2021
import numpy
from FIAT import finite_element, polynomial_set, dual_set, functional, quadrature
from FIAT.reference_element import LINE
from FIAT.barycentric_interpolation import barycentric_interpolation
class GaussLegendreDualSet(dual_set.DualSet):
"""The dual basis for 1D discontinuous elements with nodes at the
Gauss-Legendre points."""
def __init__(self, ref_el, degree):
entity_ids = {0: {0: [], 1: []},
1: {0: list(range(0, degree+1))}}
lr = quadrature.GaussLegendreQuadratureLineRule(ref_el, degree+1)
nodes = [functional.PointEvaluation(ref_el, x) for x in lr.pts]
super(GaussLegendreDualSet, self).__init__(nodes, ref_el, entity_ids)
class GaussLegendre(finite_element.CiarletElement):
"""1D discontinuous element with nodes at the Gauss-Legendre points."""
def __init__(self, ref_el, degree):
if ref_el.shape != LINE:
raise ValueError("Gauss-Legendre elements are only defined in one dimension.")
poly_set = polynomial_set.ONPolynomialSet(ref_el, degree)
dual = GaussLegendreDualSet(ref_el, degree)
formdegree = ref_el.get_spatial_dimension() # n-form
super(GaussLegendre, self).__init__(poly_set, dual, degree, formdegree)
def tabulate(self, order, points, entity=None):
# This overrides the default with a more numerically stable algorithm
if entity is None:
entity = (self.ref_el.get_dimension(), 0)
entity_dim, entity_id = entity
transform = self.ref_el.get_entity_transform(entity_dim, entity_id)
xsrc = []
for node in self.dual.nodes:
# Assert singleton point for each node.
(pt,), = node.get_point_dict().keys()
xsrc.append(pt)
xsrc = numpy.asarray(xsrc)
xdst = numpy.array(list(map(transform, points))).flatten()
return barycentric_interpolation(xsrc, xdst, order=order)
fiat-2019.2.0~git20210419.7d418fa/FIAT/gauss_lobatto_legendre.py 0000664 0000000 0000000 00000004331 14135323752 0023310 0 ustar 00root root 0000000 0000000 # Copyright (C) 2015 Imperial College London and others.
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Written by David A. Ham (david.ham@imperial.ac.uk), 2015
#
# Modified by Pablo D. Brubeck (brubeck@protonmail.com), 2021
import numpy
from FIAT import finite_element, polynomial_set, dual_set, functional, quadrature
from FIAT.reference_element import LINE
from FIAT.barycentric_interpolation import barycentric_interpolation
class GaussLobattoLegendreDualSet(dual_set.DualSet):
"""The dual basis for 1D continuous elements with nodes at the
Gauss-Lobatto points."""
def __init__(self, ref_el, degree):
entity_ids = {0: {0: [0], 1: [degree]},
1: {0: list(range(1, degree))}}
lr = quadrature.GaussLobattoLegendreQuadratureLineRule(ref_el, degree+1)
nodes = [functional.PointEvaluation(ref_el, x) for x in lr.pts]
super(GaussLobattoLegendreDualSet, self).__init__(nodes, ref_el, entity_ids)
class GaussLobattoLegendre(finite_element.CiarletElement):
"""1D continuous element with nodes at the Gauss-Lobatto points."""
def __init__(self, ref_el, degree):
if ref_el.shape != LINE:
raise ValueError("Gauss-Lobatto-Legendre elements are only defined in one dimension.")
poly_set = polynomial_set.ONPolynomialSet(ref_el, degree)
dual = GaussLobattoLegendreDualSet(ref_el, degree)
formdegree = 0 # 0-form
super(GaussLobattoLegendre, self).__init__(poly_set, dual, degree, formdegree)
def tabulate(self, order, points, entity=None):
# This overrides the default with a more numerically stable algorithm
if entity is None:
entity = (self.ref_el.get_dimension(), 0)
entity_dim, entity_id = entity
transform = self.ref_el.get_entity_transform(entity_dim, entity_id)
xsrc = []
for node in self.dual.nodes:
# Assert singleton point for each node.
(pt,), = node.get_point_dict().keys()
xsrc.append(pt)
xsrc = numpy.asarray(xsrc)
xdst = numpy.array(list(map(transform, points))).flatten()
return barycentric_interpolation(xsrc, xdst, order=order)
fiat-2019.2.0~git20210419.7d418fa/FIAT/gauss_radau.py 0000664 0000000 0000000 00000003006 14135323752 0021071 0 ustar 00root root 0000000 0000000 # Copyright (C) 2015 Imperial College London and others.
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Written by Robert C. Kirby (robert_kirby@baylor.edu), 2020
from FIAT import finite_element, polynomial_set, dual_set, functional, quadrature
from FIAT.reference_element import LINE
class GaussRadauDualSet(dual_set.DualSet):
"""The dual basis for 1D discontinuous elements with nodes at the
Gauss-Radau points."""
def __init__(self, ref_el, degree, right=True):
# Do DG connectivity because it's bonkers to do one-sided assembly even
# though we have an endpoint in the point set!
entity_ids = {0: {0: [], 1: []},
1: {0: list(range(0, degree+1))}}
lr = quadrature.RadauQuadratureLineRule(ref_el, degree+1, right)
nodes = [functional.PointEvaluation(ref_el, x) for x in lr.pts]
super(GaussRadauDualSet, self).__init__(nodes, ref_el, entity_ids)
class GaussRadau(finite_element.CiarletElement):
"""1D discontinuous element with nodes at the Gauss-Radau points."""
def __init__(self, ref_el, degree):
if ref_el.shape != LINE:
raise ValueError("Gauss-Radau elements are only defined in one dimension.")
poly_set = polynomial_set.ONPolynomialSet(ref_el, degree)
dual = GaussRadauDualSet(ref_el, degree)
formdegree = ref_el.get_spatial_dimension() # n-form
super(GaussRadau, self).__init__(poly_set, dual, degree, formdegree)
fiat-2019.2.0~git20210419.7d418fa/FIAT/hdiv_trace.py 0000664 0000000 0000000 00000036221 14135323752 0020710 0 ustar 00root root 0000000 0000000 # Copyright (C) 2016 Thomas H. Gibson
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import numpy as np
from FIAT.discontinuous_lagrange import DiscontinuousLagrange
from FIAT.dual_set import DualSet
from FIAT.finite_element import FiniteElement
from FIAT.functional import PointEvaluation
from FIAT.polynomial_set import mis
from FIAT.reference_element import (ufc_simplex, POINT,
LINE, QUADRILATERAL,
TRIANGLE, TETRAHEDRON,
TENSORPRODUCT)
from FIAT.tensor_product import TensorProductElement
# Numerical tolerance for facet-entity identifications
epsilon = 1e-10
class TraceError(Exception):
"""Exception caused by tabulating a trace element on the interior of a cell,
or the gradient of a trace element."""
def __init__(self, msg):
super(TraceError, self).__init__(msg)
self.msg = msg
class HDivTrace(FiniteElement):
"""Class implementing the trace of hdiv elements. This class
is a stand-alone element family that produces a DG-facet field.
This element is what's produced after performing the trace
operation on an existing H(Div) element.
This element is also known as the discontinuous trace field that
arises in several DG formulations.
"""
def __init__(self, ref_el, degree):
"""Constructor for the HDivTrace element.
:arg ref_el: A reference element, which may be a tensor product
cell.
:arg degree: The degree of approximation. If on a tensor product
cell, then provide a tuple of degrees if you want
varying degrees.
"""
sd = ref_el.get_spatial_dimension()
if sd in (0, 1):
raise ValueError("Cannot take the trace of a %d-dim cell." % sd)
# Store the degrees if on a tensor product cell
if ref_el.get_shape() == TENSORPRODUCT:
try:
degree = tuple(degree)
except TypeError:
degree = (degree,) * len(ref_el.cells)
assert len(ref_el.cells) == len(degree), (
"Number of specified degrees must be equal to the number of cells."
)
else:
if ref_el.get_shape() not in [TRIANGLE, TETRAHEDRON, QUADRILATERAL]:
raise NotImplementedError(
"Trace element on a %s not implemented" % type(ref_el)
)
# Cannot have varying degrees for these reference cells
if isinstance(degree, tuple):
raise ValueError("Must have a tensor product cell if providing multiple degrees")
# Initialize entity dofs and construct the DG elements
# for the facets
facet_sd = sd - 1
dg_elements = {}
entity_dofs = {}
topology = ref_el.get_topology()
for top_dim, entities in topology.items():
cell = ref_el.construct_subelement(top_dim)
entity_dofs[top_dim] = {}
# We have a facet entity!
if cell.get_spatial_dimension() == facet_sd:
dg_elements[top_dim] = construct_dg_element(cell, degree)
# Initialize
for entity in entities:
entity_dofs[top_dim][entity] = []
# Compute the dof numbering for all facet entities
# and extract nodes
offset = 0
pts = []
for facet_dim in sorted(dg_elements):
element = dg_elements[facet_dim]
nf = element.space_dimension()
num_facets = len(topology[facet_dim])
for i in range(num_facets):
entity_dofs[facet_dim][i] = list(range(offset, offset + nf))
offset += nf
# Run over nodes and collect the points for point evaluations
for dof in element.dual_basis():
facet_pt, = dof.get_point_dict()
transform = ref_el.get_entity_transform(facet_dim, i)
pts.append(tuple(transform(facet_pt)))
# Setting up dual basis - only point evaluations
nodes = [PointEvaluation(ref_el, pt) for pt in pts]
dual = DualSet(nodes, ref_el, entity_dofs)
# Degree of the element
deg = max([e.degree() for e in dg_elements.values()])
super(HDivTrace, self).__init__(ref_el, dual, order=deg,
formdegree=facet_sd,
mapping="affine")
# Set up facet elements
self.dg_elements = dg_elements
# Degree for quadrature rule
self.polydegree = deg
def degree(self):
"""Return the degree of the (embedding) polynomial space."""
return self.polydegree
def get_nodal_basis(self):
"""Return the nodal basis, encoded as a PolynomialSet object,
for the finite element."""
raise NotImplementedError("get_nodal_basis not implemented for the trace element.")
def get_coeffs(self):
"""Return the expansion coefficients for the basis of the
finite element."""
raise NotImplementedError("get_coeffs not implemented for the trace element.")
def tabulate(self, order, points, entity=None):
"""Return tabulated values of derivatives up to a given order of
basis functions at given points.
:arg order: The maximum order of derivative.
:arg points: An iterable of points.
:arg entity: Optional (dimension, entity number) pair
indicating which topological entity of the
reference element to tabulate on. If ``None``,
tabulated values are computed by geometrically
approximating which facet the points are on.
.. note ::
Performing illegal tabulations on this element will result in either
a tabulation table of `numpy.nan` arrays (`entity=None` case), or
insertions of the `TraceError` exception class. This is due to the
fact that performing cell-wise tabulations, or asking for any order
of derivative evaluations, are not mathematically well-defined.
"""
sd = self.ref_el.get_spatial_dimension()
facet_sd = sd - 1
# Initializing dictionary with zeros
phivals = {}
for i in range(order + 1):
alphas = mis(sd, i)
for alpha in alphas:
phivals[alpha] = np.zeros(shape=(self.space_dimension(), len(points)))
evalkey = (0,) * sd
# If entity is None, identify facet using numerical tolerance and
# return the tabulated values
if entity is None:
# NOTE: Numerical approximation of the facet id is currently only
# implemented for simplex reference cells.
if self.ref_el.get_shape() not in [TRIANGLE, TETRAHEDRON]:
raise NotImplementedError(
"Tabulating this element on a %s cell without providing "
"an entity is not currently supported." % type(self.ref_el)
)
# Attempt to identify which facet (if any) the given points are on
vertices = self.ref_el.vertices
coordinates = barycentric_coordinates(points, vertices)
unique_facet, success = extract_unique_facet(coordinates)
# If not successful, return NaNs
if not success:
for key in phivals:
phivals[key] = np.full(shape=(self.space_dimension(), len(points)), fill_value=np.nan)
return phivals
# Otherwise, extract non-zero values and insertion indices
else:
# Map points to the reference facet
new_points = map_to_reference_facet(points, vertices, unique_facet)
# Retrieve values by tabulating the DG element
element = self.dg_elements[facet_sd]
nf = element.space_dimension()
nonzerovals, = element.tabulate(order, new_points).values()
indices = slice(nf * unique_facet, nf * (unique_facet + 1))
else:
entity_dim, _ = entity
# If the user is directly specifying cell-wise tabulation, return
# TraceErrors in dict for appropriate handling in the form compiler
if entity_dim not in self.dg_elements:
for key in phivals:
msg = "The HDivTrace element can only be tabulated on facets."
phivals[key] = TraceError(msg)
return phivals
else:
# Retrieve function evaluations (order = 0 case)
offset = 0
for facet_dim in sorted(self.dg_elements):
element = self.dg_elements[facet_dim]
nf = element.space_dimension()
num_facets = len(self.ref_el.get_topology()[facet_dim])
# Loop over the number of facets until we find a facet
# with matching dimension and id
for i in range(num_facets):
# Found it! Grab insertion indices
if (facet_dim, i) == entity:
nonzerovals, = element.tabulate(0, points).values()
indices = slice(offset, offset + nf)
offset += nf
# If asking for gradient evaluations, insert TraceError in
# gradient slots
if order > 0:
msg = "Gradients on trace elements are not well-defined."
for key in phivals:
if key != evalkey:
phivals[key] = TraceError(msg)
# Insert non-zero values in appropriate place
phivals[evalkey][indices, :] = nonzerovals
return phivals
def value_shape(self):
"""Return the value shape of the finite element functions."""
return ()
def dmats(self):
"""Return dmats: expansion coefficients for basis function
derivatives."""
raise NotImplementedError("dmats not implemented for the trace element.")
def get_num_members(self, arg):
"""Return number of members of the expansion set."""
raise NotImplementedError("get_num_members not implemented for the trace element.")
@staticmethod
def is_nodal():
return True
def construct_dg_element(ref_el, degree):
"""Constructs a discontinuous galerkin element of a given degree
on a particular reference cell.
"""
if ref_el.get_shape() in [LINE, TRIANGLE]:
dg_element = DiscontinuousLagrange(ref_el, degree)
# Quadrilateral facets could be on a FiredrakeQuadrilateral.
# In this case, we treat this as an interval x interval cell:
elif ref_el.get_shape() == QUADRILATERAL:
dg_a = DiscontinuousLagrange(ufc_simplex(1), degree)
dg_b = DiscontinuousLagrange(ufc_simplex(1), degree)
dg_element = TensorProductElement(dg_a, dg_b)
# This handles the more general case for facets:
elif ref_el.get_shape() == TENSORPRODUCT:
assert len(degree) == len(ref_el.cells), (
"Must provide the same number of degrees as the number "
"of cells that make up the tensor product cell."
)
sub_elements = [construct_dg_element(c, d)
for c, d in zip(ref_el.cells, degree)
if c.get_shape() != POINT]
if len(sub_elements) > 1:
dg_element = TensorProductElement(*sub_elements)
else:
dg_element, = sub_elements
else:
raise NotImplementedError(
"Reference cells of type %s not currently supported" % type(ref_el)
)
return dg_element
# The following functions are credited to Marie E. Rognes:
def extract_unique_facet(coordinates, tolerance=epsilon):
"""Determines whether a set of points (described in its barycentric coordinates)
are all on one of the facet sub-entities, and return the particular facet and
whether the search has been successful.
:arg coordinates: A set of points described in barycentric coordinates.
:arg tolerance: A fixed tolerance for geometric identifications.
"""
facets = []
for c in coordinates:
on_facet = set([i for (i, l) in enumerate(c) if abs(l) < tolerance])
facets += [on_facet]
unique_facet = facets[0]
for f in facets:
unique_facet = unique_facet & f
# Handle coordinates not on facets
if len(unique_facet) != 1:
return (None, False)
# If we have a unique facet, return it and success
return (unique_facet.pop(), True)
def barycentric_coordinates(points, vertices):
"""Computes the barycentric coordinates for a set of points relative to a
simplex defined by a set of vertices.
:arg points: A set of points.
:arg vertices: A set of vertices that define the simplex.
"""
# Form mapping matrix
T = (np.asarray(vertices[:-1]) - vertices[-1]).T
invT = np.linalg.inv(T)
points = np.asarray(points)
bary = np.einsum("ij,kj->ki", invT, (points - vertices[-1]))
last = (1 - bary.sum(axis=1))
return np.concatenate([bary, last[..., np.newaxis]], axis=1)
def map_from_reference_facet(point, vertices):
"""Evaluates the physical coordinate of a point using barycentric
coordinates.
:arg point: The reference points to be mapped to the facet.
:arg vertices: The vertices defining the physical element.
"""
# Compute the barycentric coordinates of the point relative to the reference facet
reference_simplex = ufc_simplex(len(vertices) - 1)
reference_vertices = reference_simplex.get_vertices()
coords = barycentric_coordinates([point, ], reference_vertices)[0]
# Evaluates the physical coordinate of the point using barycentric coordinates
point = sum(vertices[j] * coords[j] for j in range(len(coords)))
return tuple(point)
def map_to_reference_facet(points, vertices, facet):
"""Given a set of points and vertices describing a facet of a simplex in n-dimensional
coordinates (where the points lie on the facet), map the points to the reference simplex
of dimension (n-1).
:arg points: A set of points in n-D.
:arg vertices: A set of vertices describing a facet of a simplex in n-D.
:arg facet: Integer representing the facet number.
"""
# Compute the barycentric coordinates of the points with respect to the
# full physical simplex
all_coords = barycentric_coordinates(points, vertices)
# Extract vertices of the reference facet
reference_facet_simplex = ufc_simplex(len(vertices) - 2)
reference_vertices = reference_facet_simplex.get_vertices()
reference_points = []
for (i, coords) in enumerate(all_coords):
# Extract the correct subset of barycentric coordinates since we know
# which facet we are on
new_coords = [coords[j] for j in range(len(coords)) if j != facet]
# Evaluate the reference coordinate of a point in barycentric coordinates
reference_pt = sum(np.asarray(reference_vertices[j]) * new_coords[j]
for j in range(len(new_coords)))
reference_points += [reference_pt]
return reference_points
fiat-2019.2.0~git20210419.7d418fa/FIAT/hdivcurl.py 0000664 0000000 0000000 00000030671 14135323752 0020423 0 ustar 00root root 0000000 0000000 # Copyright (C) 2013 Andrew T. T. McRae (Imperial College London)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import numpy
import types
from FIAT.tensor_product import TensorProductElement
from FIAT import functional
def Hdiv(element):
if not isinstance(element, TensorProductElement):
raise NotImplementedError
if element.A.get_formdegree() is None or element.B.get_formdegree() is None:
raise ValueError("form degree of sub-element was None (not set during initialisation), Hdiv cannot be done without this information")
formdegree = element.A.get_formdegree() + element.B.get_formdegree()
if formdegree != element.get_reference_element().get_spatial_dimension() - 1:
raise ValueError("Tried to use Hdiv on a non-(n-1)-form element")
newelement = TensorProductElement(element.A, element.B) # make a copy to return
# redefine value_shape()
def value_shape(self):
"Return the value shape of the finite element functions."
return (self.get_reference_element().get_spatial_dimension(),)
newelement.value_shape = types.MethodType(value_shape, newelement)
# store old _mapping
newelement._oldmapping = newelement._mapping
# redefine _mapping
newelement._mapping = "contravariant piola"
# store formdegree
newelement.formdegree = formdegree
# redefine tabulate
newelement.old_tabulate = newelement.tabulate
def tabulate(self, order, points, entity=None):
"""Return tabulated values of derivatives up to given order of
basis functions at given points."""
# don't duplicate what the old function does fine...
old_result = self.old_tabulate(order, points, entity)
new_result = {}
sd = self.get_reference_element().get_spatial_dimension()
for alpha in old_result.keys():
temp_old = old_result[alpha]
if self._oldmapping == "affine":
temp = numpy.zeros((temp_old.shape[0], sd, temp_old.shape[1]), dtype=temp_old.dtype)
# both constituents affine, i.e., they were 0 forms or n-forms.
# to sum to n-1, we must have "0-form on an interval" crossed
# with something discontinuous.
# look for the (continuous) 0-form, and put the value there
if self.A.get_formdegree() == 0:
# first element, so (-x, 0, ...)
# Sign flip to ensure that a positive value of the node
# means a vector field having a direction "to the left"
# relative to direction in which the nodes are placed on an
# edge in case of higher-order schemes.
# This is required for unstructured quadrilateral meshes.
temp[:, 0, :] = -temp_old[:, :]
elif self.B.get_formdegree() == 0:
# second element, so (..., 0, x)
temp[:, -1, :] = temp_old[:, :]
else:
raise Exception("Hdiv affine/affine form degrees broke")
elif self._oldmapping == "contravariant piola":
temp = numpy.zeros((temp_old.shape[0], sd, temp_old.shape[2]), dtype=temp_old.dtype)
Asd = self.A.get_reference_element().get_spatial_dimension()
# one component is affine, one is contravariant piola
# the affine one must be an n-form, hence discontinuous
# this component/these components get zeroed out
if element.A.mapping()[0] == "contravariant piola":
# first element, so (x1, ..., xn, 0, ...)
temp[:, :Asd, :] = temp_old[:, :, :]
elif element.B.mapping()[0] == "contravariant piola":
# second element, so (..., 0, x1, ..., xn)
temp[:, Asd:, :] = temp_old[:, :, :]
else:
raise ValueError("Hdiv contravariant piola couldn't find an existing ConPi subelement")
elif self._oldmapping == "covariant piola":
temp = numpy.zeros((temp_old.shape[0], sd, temp_old.shape[2]), dtype=temp_old.dtype)
# one component is affine, one is covariant piola
# the affine one must be an n-form, hence discontinuous
# this component/these components get zeroed out
# the remaining part gets perped
if element.A.mapping()[0] == "covariant piola":
Asd = self.A.get_reference_element().get_spatial_dimension()
if not Asd == 2:
raise ValueError("Must be 2d shape to automatically convert covariant to contravariant")
temp_perp = numpy.zeros(temp_old.shape, dtype=temp_old.dtype)
# first element, so (x2, -x1, 0, ...)
temp_perp[:, 0, :] = temp_old[:, 1, :]
temp_perp[:, 1, :] = -temp_old[:, 0, :]
temp[:, :Asd, :] = temp_perp[:, :, :]
elif element.B.mapping()[0] == "covariant piola":
Bsd = self.B.get_reference_element().get_spatial_dimension()
if not Bsd == 2:
raise ValueError("Must be 2d shape to automatically convert covariant to contravariant")
temp_perp = numpy.zeros(temp_old.shape, dtype=temp_old.dtype)
# second element, so (..., 0, x2, -x1)
temp_perp[:, 0, :] = temp_old[:, 1, :]
temp_perp[:, 1, :] = -temp_old[:, 0, :]
temp[:, Asd:, :] = temp_old[:, :, :]
else:
raise ValueError("Hdiv covariant piola couldn't find an existing CovPi subelement")
new_result[alpha] = temp
return new_result
newelement.tabulate = types.MethodType(tabulate, newelement)
# splat any PointEvaluation functionals.
# they become a nasty mix of internal and external component DOFs
if newelement._oldmapping == "affine":
oldnodes = newelement.dual.nodes
newnodes = []
for node in oldnodes:
if isinstance(node, functional.PointEvaluation):
newnodes.append(functional.Functional(None, None, None, {}, "Undefined"))
else:
newnodes.append(node)
newelement.dual.nodes = newnodes
return newelement
def Hcurl(element):
if not isinstance(element, TensorProductElement):
raise NotImplementedError
if element.A.get_formdegree() is None or element.B.get_formdegree() is None:
raise ValueError("form degree of sub-element was None (not set during initialisation), Hcurl cannot be done without this information")
formdegree = element.A.get_formdegree() + element.B.get_formdegree()
if not (formdegree == 1):
raise ValueError("Tried to use Hcurl on a non-1-form element")
newelement = TensorProductElement(element.A, element.B) # make a copy to return
# redefine value_shape()
def value_shape(self):
"Return the value shape of the finite element functions."
return (self.get_reference_element().get_spatial_dimension(),)
newelement.value_shape = types.MethodType(value_shape, newelement)
# store old _mapping
newelement._oldmapping = newelement._mapping
# redefine _mapping
newelement._mapping = "covariant piola"
# store formdegree
newelement.formdegree = formdegree
# redefine tabulate
newelement.old_tabulate = newelement.tabulate
def tabulate(self, order, points, entity=None):
"""Return tabulated values of derivatives up to given order of
basis functions at given points."""
# don't duplicate what the old function does fine...
old_result = self.old_tabulate(order, points, entity)
new_result = {}
sd = self.get_reference_element().get_spatial_dimension()
for alpha in old_result.keys():
temp_old = old_result[alpha]
if self._oldmapping == "affine":
temp = numpy.zeros((temp_old.shape[0], sd, temp_old.shape[1]), dtype=temp_old.dtype)
# both constituents affine, i.e., they were 0 forms or n-forms.
# to sum to 1, we must have "1-form on an interval" crossed with
# a bunch of 0-forms (continuous).
# look for the 1-form, and put the value in the other place
if self.A.get_formdegree() == 1:
# first element, so (x, 0, ...)
# No sign flip here, nor at the other branch, to ensure that
# a positive value of the node means a vector field having
# the same direction as the direction in which the nodes are
# placed on an edge in case of higher-order schemes.
# This is required for unstructured quadrilateral meshes.
temp[:, 0, :] = temp_old[:, :]
elif self.B.get_formdegree() == 1:
# second element, so (..., 0, x)
temp[:, -1, :] = temp_old[:, :]
else:
raise Exception("Hcurl affine/affine form degrees broke")
elif self._oldmapping == "covariant piola":
temp = numpy.zeros((temp_old.shape[0], sd, temp_old.shape[2]), dtype=temp_old.dtype)
Asd = self.A.get_reference_element().get_spatial_dimension()
# one component is affine, one is covariant piola
# the affine one must be an 0-form, hence continuous
# this component/these components get zeroed out
if element.A.mapping()[0] == "covariant piola":
# first element, so (x1, ..., xn, 0, ...)
temp[:, :Asd, :] = temp_old[:, :, :]
elif element.B.mapping()[0] == "covariant piola":
# second element, so (..., 0, x1, ..., xn)
temp[:, Asd:, :] = temp_old[:, :, :]
else:
raise ValueError("Hdiv contravariant piola couldn't find an existing ConPi subelement")
elif self._oldmapping == "contravariant piola":
temp = numpy.zeros((temp_old.shape[0], sd, temp_old.shape[2]), dtype=temp_old.dtype)
# one component is affine, one is contravariant piola
# the affine one must be an 0-form, hence continuous
# this component/these components get zeroed out
# the remaining part gets perped
if element.A.mapping()[0] == "contravariant piola":
Asd = self.A.get_reference_element().get_spatial_dimension()
if not Asd == 2:
raise ValueError("Must be 2d shape to automatically convert contravariant to covariant")
temp_perp = numpy.zeros(temp_old.shape, dtype=temp_old.dtype)
# first element, so (-x2, x1, 0, ...)
temp_perp[:, 0, :] = -temp_old[:, 1, :]
temp_perp[:, 1, :] = temp_old[:, 0, :]
temp[:, :Asd, :] = temp_perp[:, :, :]
elif element.B.mapping()[0] == "contravariant piola":
Bsd = self.B.get_reference_element().get_spatial_dimension()
if not Bsd == 2:
raise ValueError("Must be 2d shape to automatically convert contravariant to covariant")
temp_perp = numpy.zeros(temp_old.shape, dtype=temp_old.dtype)
# second element, so (..., 0, -x2, x1)
temp_perp[:, 0, :] = -temp_old[:, 1, :]
temp_perp[:, 1, :] = temp_old[:, 0, :]
temp[:, Asd:, :] = temp_old[:, :, :]
else:
raise ValueError("Hcurl contravariant piola couldn't find an existing CovPi subelement")
new_result[alpha] = temp
return new_result
newelement.tabulate = types.MethodType(tabulate, newelement)
# splat any PointEvaluation functionals.
# they become a nasty mix of internal and external component DOFs
if newelement._oldmapping == "affine":
oldnodes = newelement.dual.nodes
newnodes = []
for node in oldnodes:
if isinstance(node, functional.PointEvaluation):
newnodes.append(functional.Functional(None, None, None, {}, "Undefined"))
else:
newnodes.append(node)
newelement.dual.nodes = newnodes
return newelement
fiat-2019.2.0~git20210419.7d418fa/FIAT/hellan_herrmann_johnson.py 0000664 0000000 0000000 00000007601 14135323752 0023473 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
"""Implementation of the Hellan-Herrmann-Johnson finite elements."""
# Copyright (C) 2016-2018 Lizao Li
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT.finite_element import CiarletElement
from FIAT.dual_set import DualSet
from FIAT.polynomial_set import ONSymTensorPolynomialSet
from FIAT.functional import PointwiseInnerProductEvaluation as InnerProduct
import numpy
class HellanHerrmannJohnsonDual(DualSet):
"""Degrees of freedom for Hellan-Herrmann-Johnson elements."""
def __init__(self, cell, degree):
dim = cell.get_spatial_dimension()
if not dim == 2:
raise ValueError("Hellan_Herrmann-Johnson elements are only"
"defined in dimension 2.")
# construct the degrees of freedoms
dofs = [] # list of functionals
# dof_ids[i][j] contains the indices of dofs that are associated with
# entity j in dim i
dof_ids = {}
# no vertex dof
dof_ids[0] = {i: [] for i in range(dim + 1)}
# edge dofs
(_dofs, _dof_ids) = self._generate_edge_dofs(cell, degree, 0)
dofs.extend(_dofs)
dof_ids[1] = _dof_ids
# cell dofs
(_dofs, _dof_ids) = self._generate_trig_dofs(cell, degree, len(dofs))
dofs.extend(_dofs)
dof_ids[dim] = _dof_ids
super(HellanHerrmannJohnsonDual, self).__init__(dofs, cell, dof_ids)
@staticmethod
def _generate_edge_dofs(cell, degree, offset):
"""Generate dofs on edges.
On each edge, let n be its normal. For degree=r, the scalar function
n^T u n
is evaluated at points enough to control P(r).
"""
dofs = []
dof_ids = {}
for entity_id in range(3): # a triangle has 3 edges
pts = cell.make_points(1, entity_id, degree + 2) # edges are 1D
normal = cell.compute_scaled_normal(entity_id)
dofs += [InnerProduct(cell, normal, normal, pt) for pt in pts]
num_new_dofs = len(pts) # 1 dof per point on edge
dof_ids[entity_id] = list(range(offset, offset + num_new_dofs))
offset += num_new_dofs
return (dofs, dof_ids)
@staticmethod
def _generate_trig_dofs(cell, degree, offset):
"""Generate dofs on edges.
On each triangle, for degree=r, the three components
u11, u12, u22
are evaluated at points enough to control P(r-1).
"""
dofs = []
dof_ids = {}
pts = cell.make_points(2, 0, degree + 2) # 2D trig #0
e1 = numpy.array([1.0, 0.0]) # euclidean basis 1
e2 = numpy.array([0.0, 1.0]) # euclidean basis 2
basis = [(e1, e1), (e1, e2), (e2, e2)] # basis for symmetric matrix
for (v1, v2) in basis:
dofs += [InnerProduct(cell, v1, v2, pt) for pt in pts]
num_dofs = 3 * len(pts) # 3 dofs per trig
dof_ids[0] = list(range(offset, offset + num_dofs))
return (dofs, dof_ids)
class HellanHerrmannJohnson(CiarletElement):
"""The definition of Hellan-Herrmann-Johnson element. It is defined only in
dimension 2. It consists of piecewise polynomial symmetric-matrix-valued
functions of degree r or less with normal-normal continuity.
"""
def __init__(self, cell, degree):
assert degree >= 0, "Hellan-Herrmann-Johnson starts at degree 0!"
# shape functions
Ps = ONSymTensorPolynomialSet(cell, degree)
# degrees of freedom
Ls = HellanHerrmannJohnsonDual(cell, degree)
# mapping under affine transformation
mapping = "double contravariant piola"
super(HellanHerrmannJohnson, self).__init__(Ps, Ls, degree,
mapping=mapping)
fiat-2019.2.0~git20210419.7d418fa/FIAT/hermite.py 0000664 0000000 0000000 00000004603 14135323752 0020234 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008 Robert C. Kirby (Texas Tech University)
# Modified 2017 by RCK
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT import finite_element, polynomial_set, dual_set, functional
class CubicHermiteDualSet(dual_set.DualSet):
"""The dual basis for Lagrange elements. This class works for
simplices of any dimension. Nodes are point evaluation at
equispaced points."""
def __init__(self, ref_el):
entity_ids = {}
nodes = []
cur = 0
# make nodes by getting points
# need to do this dimension-by-dimension, facet-by-facet
top = ref_el.get_topology()
verts = ref_el.get_vertices()
sd = ref_el.get_spatial_dimension()
# get jet at each vertex
entity_ids[0] = {}
for v in sorted(top[0]):
nodes.append(functional.PointEvaluation(ref_el, verts[v]))
pd = functional.PointDerivative
for i in range(sd):
alpha = [0] * sd
alpha[i] = 1
nodes.append(pd(ref_el, verts[v], alpha))
entity_ids[0][v] = list(range(cur, cur + 1 + sd))
cur += sd + 1
# now only have dofs at the barycenter, which is the
# maximal dimension
# no edge dof
entity_ids[1] = {}
for i in top[1]:
entity_ids
entity_ids[1][i] = []
if sd > 1:
# face dof
# point evaluation at barycenter
entity_ids[2] = {}
for f in sorted(top[2]):
pt = ref_el.make_points(2, f, 3)[0]
n = functional.PointEvaluation(ref_el, pt)
nodes.append(n)
entity_ids[2][f] = list(range(cur, cur + 1))
cur += 1
for dim in range(3, sd + 1):
entity_ids[dim] = {}
for facet in top[dim]:
entity_ids[dim][facet] = []
super(CubicHermiteDualSet, self).__init__(nodes, ref_el, entity_ids)
class CubicHermite(finite_element.CiarletElement):
"""The cubic Hermite finite element. It is what it is."""
def __init__(self, ref_el, deg=3):
assert deg == 3
poly_set = polynomial_set.ONPolynomialSet(ref_el, 3)
dual = CubicHermiteDualSet(ref_el)
super(CubicHermite, self).__init__(poly_set, dual, 3)
fiat-2019.2.0~git20210419.7d418fa/FIAT/jacobi.py 0000664 0000000 0000000 00000006603 14135323752 0020030 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008 Robert C. Kirby (Texas Tech University)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
"""Several functions related to the one-dimensional jacobi polynomials:
Evaluation, evaluation of derivatives, plus computation of the roots
via Newton's method. These mainly are used in defining the expansion
functions over the simplices and in defining quadrature
rules over each domain."""
import numpy
def eval_jacobi(a, b, n, x):
"""Evaluates the nth jacobi polynomial with weight parameters a,b at a
point x. Recurrence relations implemented from the pseudocode
given in Karniadakis and Sherwin, Appendix B"""
if 0 == n:
return 1.0
elif 1 == n:
return 0.5 * (a - b + (a + b + 2.0) * x)
else: # 2 <= n
apb = a + b
pn2 = 1.0
pn1 = 0.5 * (a - b + (apb + 2.0) * x)
p = 0
for k in range(2, n + 1):
a1 = 2.0 * k * (k + apb) * (2.0 * k + apb - 2.0)
a2 = (2.0 * k + apb - 1.0) * (a * a - b * b)
a3 = (2.0 * k + apb - 2.0) \
* (2.0 * k + apb - 1.0) \
* (2.0 * k + apb)
a4 = 2.0 * (k + a - 1.0) * (k + b - 1.0) \
* (2.0 * k + apb)
a2 = a2 / a1
a3 = a3 / a1
a4 = a4 / a1
p = (a2 + a3 * x) * pn1 - a4 * pn2
pn2 = pn1
pn1 = p
return p
def eval_jacobi_batch(a, b, n, xs):
"""Evaluates all jacobi polynomials with weights a,b
up to degree n. xs is a numpy.array of points.
Returns a two-dimensional array of points, where the
rows correspond to the Jacobi polynomials and the
columns correspond to the points."""
result = numpy.zeros((n + 1, len(xs)), xs.dtype)
# hack to make sure AD type is propogated through
for ii in range(result.shape[1]):
result[0, ii] = 1.0 + xs[ii, 0] - xs[ii, 0]
xsnew = xs.reshape((-1,))
if n > 0:
result[1, :] = 0.5 * (a - b + (a + b + 2.0) * xsnew)
apb = a + b
for k in range(2, n + 1):
a1 = 2.0 * k * (k + apb) * (2.0 * k + apb - 2.0)
a2 = (2.0 * k + apb - 1.0) * (a * a - b * b)
a3 = (2.0 * k + apb - 2.0) \
* (2.0 * k + apb - 1.0) \
* (2.0 * k + apb)
a4 = 2.0 * (k + a - 1.0) * (k + b - 1.0) \
* (2.0 * k + apb)
a2 = a2 / a1
a3 = a3 / a1
a4 = a4 / a1
result[k, :] = (a2 + a3 * xsnew) * result[k-1, :] \
- a4 * result[k-2, :]
return result
def eval_jacobi_deriv(a, b, n, x):
"""Evaluates the first derivative of P_{n}^{a,b} at a point x."""
if n == 0:
return 0.0
else:
return 0.5 * (a + b + n + 1) * eval_jacobi(a + 1, b + 1, n - 1, x)
def eval_jacobi_deriv_batch(a, b, n, xs):
"""Evaluates the first derivatives of all jacobi polynomials with
weights a,b up to degree n. xs is a numpy.array of points.
Returns a two-dimensional array of points, where the
rows correspond to the Jacobi polynomials and the
columns correspond to the points."""
results = numpy.zeros((n + 1, len(xs)), "d")
if n == 0:
return results
else:
results[1:, :] = eval_jacobi_batch(a + 1, b + 1, n - 1, xs)
for j in range(1, n + 1):
results[j, :] *= 0.5 * (a + b + j + 1)
return results
fiat-2019.2.0~git20210419.7d418fa/FIAT/kong_mulder_veldhuizen.py 0000664 0000000 0000000 00000014201 14135323752 0023335 0 ustar 00root root 0000000 0000000 # Copyright (C) 2020 Robert C. Kirby (Baylor University)
#
# contributions by Keith Roberts (University of São Paulo)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT import (
finite_element,
dual_set,
functional,
Bubble,
FacetBubble,
Lagrange,
NodalEnrichedElement,
RestrictedElement,
reference_element,
)
from FIAT.quadrature_schemes import create_quadrature
TRIANGLE = reference_element.UFCTriangle()
TETRAHEDRON = reference_element.UFCTetrahedron()
def _get_entity_ids(ref_el, degree):
"""The topological association in a dictionary"""
T = ref_el.topology
sd = ref_el.get_spatial_dimension()
if degree == 1: # works for any spatial dimension.
entity_ids = {0: dict((i, [i]) for i in range(len(T[0])))}
for d in range(1, sd + 1):
entity_ids[d] = dict((i, []) for i in range(len(T[d])))
elif degree == 2:
if sd == 2:
entity_ids = {
0: dict((i, [i]) for i in range(3)),
1: dict((i, [i + 3]) for i in range(3)),
2: {0: [6]},
}
elif sd == 3:
entity_ids = {
0: dict((i, [i]) for i in range(4)),
1: dict((i, [i + 4]) for i in range(6)),
2: dict((i, [i + 10]) for i in range(4)),
3: {0: [14]},
}
elif degree == 3:
if sd == 2:
etop = [[3, 4], [6, 5], [7, 8]]
entity_ids = {
0: dict((i, [i]) for i in range(3)),
1: dict((i, etop[i]) for i in range(3)),
2: {0: [9, 10, 11]},
}
elif sd == 3:
etop = [[4, 5], [7, 6], [8, 9], [11, 10], [12, 13], [14, 15]]
ftop = [[16, 17, 18], [19, 20, 21], [22, 23, 24], [25, 26, 27]]
entity_ids = {
0: dict((i, [i]) for i in range(4)),
1: dict((i, etop[i]) for i in range(6)),
2: dict((i, ftop[i]) for i in range(4)),
3: {0: [28, 29, 30, 31]},
}
elif degree == 4:
if sd == 2:
etop = [[6, 3, 7], [9, 4, 8], [10, 5, 11]]
entity_ids = {
0: dict((i, [i]) for i in range(3)),
1: dict((i, etop[i]) for i in range(3)),
2: {0: [i for i in range(12, 18)]},
}
elif degree == 5:
if sd == 2:
etop = [[9, 3, 4, 10], [12, 6, 5, 11], [13, 7, 8, 14]]
entity_ids = {
0: dict((i, [i]) for i in range(3)),
1: dict((i, etop[i]) for i in range(3)),
2: {0: [i for i in range(15, 30)]},
}
return entity_ids
def bump(T, deg):
"""Increase degree of polynomial along face/edges"""
sd = T.get_spatial_dimension()
if deg == 1:
return (0, 0)
else:
if sd == 2:
if deg < 5:
return (1, 1)
elif deg == 5:
return (2, 2)
else:
raise ValueError("Degree not supported")
elif sd == 3:
if deg < 4:
return (1, 2)
else:
raise ValueError("Degree not supported")
else:
raise ValueError("Dimension of element is not supported")
def KongMulderVeldhuizenSpace(T, deg):
sd = T.get_spatial_dimension()
if deg == 1:
return Lagrange(T, 1).poly_set
else:
L = Lagrange(T, deg)
# Toss the bubble from Lagrange since it's dependent
# on the higher-dimensional bubbles
if sd == 2:
inds = [
i
for i in range(L.space_dimension())
if i not in L.dual.entity_ids[sd][0]
]
elif sd == 3:
not_inds = [L.dual.entity_ids[sd][0]] + [
L.dual.entity_ids[sd - 1][f] for f in L.dual.entity_ids[sd - 1]
]
not_inds = [item for sublist in not_inds for item in sublist]
inds = [i for i in range(L.space_dimension()) if i not in not_inds]
RL = RestrictedElement(L, inds)
# interior cell bubble
bubs = Bubble(T, deg + bump(T, deg)[1])
if sd == 2:
return NodalEnrichedElement(RL, bubs).poly_set
elif sd == 3:
# bubble on the facet
fbubs = FacetBubble(T, deg + bump(T, deg)[0])
return NodalEnrichedElement(RL, bubs, fbubs).poly_set
class KongMulderVeldhuizenDualSet(dual_set.DualSet):
"""The dual basis for KMV simplical elements."""
def __init__(self, ref_el, degree):
entity_ids = {}
entity_ids = _get_entity_ids(ref_el, degree)
lr = create_quadrature(ref_el, degree, scheme="KMV")
nodes = [functional.PointEvaluation(ref_el, x) for x in lr.pts]
super(KongMulderVeldhuizenDualSet, self).__init__(nodes, ref_el, entity_ids)
class KongMulderVeldhuizen(finite_element.CiarletElement):
"""The "lumped" simplical finite element (NB: requires custom quad. "KMV" points to achieve a diagonal mass matrix).
References
----------
Higher-order triangular and tetrahedral finite elements with mass
lumping for solving the wave equation
M. J. S. CHIN-JOE-KONG, W. A. MULDER and M. VAN VELDHUIZEN
HIGHER-ORDER MASS-LUMPED FINITE ELEMENTS FOR THE WAVE EQUATION
W.A. MULDER
NEW HIGHER-ORDER MASS-LUMPED TETRAHEDRAL ELEMENTS
S. GEEVERS, W.A. MULDER, AND J.J.W. VAN DER VEGT
"""
def __init__(self, ref_el, degree):
if ref_el != TRIANGLE and ref_el != TETRAHEDRON:
raise ValueError("KMV is only valid for triangles and tetrahedrals")
if degree > 5 and ref_el == TRIANGLE:
raise NotImplementedError("Only P < 6 for triangles are implemented.")
if degree > 3 and ref_el == TETRAHEDRON:
raise NotImplementedError("Only P < 4 for tetrahedrals are implemented.")
S = KongMulderVeldhuizenSpace(ref_el, degree)
dual = KongMulderVeldhuizenDualSet(ref_el, degree)
formdegree = 0 # 0-form
super(KongMulderVeldhuizen, self).__init__(
S, dual, degree + max(bump(ref_el, degree)), formdegree
)
fiat-2019.2.0~git20210419.7d418fa/FIAT/lagrange.py 0000664 0000000 0000000 00000003202 14135323752 0020351 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008 Robert C. Kirby (Texas Tech University)
# Modified by Andrew T. T. McRae (Imperial College London)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT import finite_element, polynomial_set, dual_set, functional
class LagrangeDualSet(dual_set.DualSet):
"""The dual basis for Lagrange elements. This class works for
simplices of any dimension. Nodes are point evaluation at
equispaced points."""
def __init__(self, ref_el, degree):
entity_ids = {}
nodes = []
# make nodes by getting points
# need to do this dimension-by-dimension, facet-by-facet
top = ref_el.get_topology()
cur = 0
for dim in sorted(top):
entity_ids[dim] = {}
for entity in sorted(top[dim]):
pts_cur = ref_el.make_points(dim, entity, degree)
nodes_cur = [functional.PointEvaluation(ref_el, x)
for x in pts_cur]
nnodes_cur = len(nodes_cur)
nodes += nodes_cur
entity_ids[dim][entity] = list(range(cur, cur + nnodes_cur))
cur += nnodes_cur
super(LagrangeDualSet, self).__init__(nodes, ref_el, entity_ids)
class Lagrange(finite_element.CiarletElement):
"""The Lagrange finite element. It is what it is."""
def __init__(self, ref_el, degree):
poly_set = polynomial_set.ONPolynomialSet(ref_el, degree)
dual = LagrangeDualSet(ref_el, degree)
formdegree = 0 # 0-form
super(Lagrange, self).__init__(poly_set, dual, degree, formdegree)
fiat-2019.2.0~git20210419.7d418fa/FIAT/mardal_tai_winther.py 0000664 0000000 0000000 00000013030 14135323752 0022426 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
"""Implementation of the Mardal-Tai-Winther finite elements."""
# Copyright (C) 2020 by Robert C. Kirby (Baylor University)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT.finite_element import CiarletElement
from FIAT.dual_set import DualSet
from FIAT.polynomial_set import ONPolynomialSet
from FIAT.functional import (IntegralMomentOfNormalEvaluation,
IntegralMomentOfTangentialEvaluation,
IntegralLegendreNormalMoment,
IntegralMomentOfDivergence)
from FIAT.quadrature import make_quadrature
def DivergenceDubinerMoments(cell, start_deg, stop_deg, comp_deg):
onp = ONPolynomialSet(cell, stop_deg)
Q = make_quadrature(cell, comp_deg)
pts = Q.get_points()
onp = onp.tabulate(pts, 0)[0, 0]
ells = []
for ii in range((start_deg)*(start_deg+1)//2,
(stop_deg+1)*(stop_deg+2)//2):
ells.append(IntegralMomentOfDivergence(cell, Q, onp[ii, :]))
return ells
class MardalTaiWintherDual(DualSet):
"""Degrees of freedom for Mardal-Tai-Winther elements."""
def __init__(self, cell, degree):
dim = cell.get_spatial_dimension()
if not dim == 2:
raise ValueError("Mardal-Tai-Winther elements are only"
"defined in dimension 2.")
if not degree == 3:
raise ValueError("Mardal-Tai-Winther elements are only defined"
"for degree 3.")
# construct the degrees of freedoms
dofs = [] # list of functionals
# dof_ids[i][j] contains the indices of dofs that are associated with
# entity j in dim i
dof_ids = {}
# no vertex dof
dof_ids[0] = {i: [] for i in range(dim + 1)}
# edge dofs
(_dofs, _dof_ids) = self._generate_edge_dofs(cell, degree)
dofs.extend(_dofs)
dof_ids[1] = _dof_ids
# no cell dofs
dof_ids[2] = {}
dof_ids[2][0] = []
# extra dofs for enforcing div(v) constant over the cell and
# v.n linear on edges
(_dofs, _edge_dof_ids, _cell_dof_ids) = self._generate_constraint_dofs(cell, degree, len(dofs))
dofs.extend(_dofs)
for entity_id in range(3):
dof_ids[1][entity_id] = dof_ids[1][entity_id] + _edge_dof_ids[entity_id]
dof_ids[2][0] = dof_ids[2][0] + _cell_dof_ids
super(MardalTaiWintherDual, self).__init__(dofs, cell, dof_ids)
@staticmethod
def _generate_edge_dofs(cell, degree):
"""Generate dofs on edges.
On each edge, let n be its normal. We need to integrate
u.n and u.t against the first Legendre polynomial (constant)
and u.n against the second (linear).
"""
dofs = []
dof_ids = {}
offset = 0
sd = 2
facet = cell.get_facet_element()
# Facet nodes are \int_F v\cdot n p ds where p \in P_{q-1}
# degree is q - 1
Q = make_quadrature(facet, 6)
Pq = ONPolynomialSet(facet, 1)
Pq_at_qpts = Pq.tabulate(Q.get_points())[tuple([0]*(sd - 1))]
for f in range(3):
phi0 = Pq_at_qpts[0, :]
dofs.append(IntegralMomentOfNormalEvaluation(cell, Q, phi0, f))
dofs.append(IntegralMomentOfTangentialEvaluation(cell, Q, phi0, f))
phi1 = Pq_at_qpts[1, :]
dofs.append(IntegralMomentOfNormalEvaluation(cell, Q, phi1, f))
num_new_dofs = 3
dof_ids[f] = list(range(offset, offset + num_new_dofs))
offset += num_new_dofs
return (dofs, dof_ids)
@staticmethod
def _generate_constraint_dofs(cell, degree, offset):
"""
Generate constraint dofs on the cell and edges
* div(v) must be constant on the cell. Since v is a cubic and
div(v) is quadratic, we need the integral of div(v) against the
linear and quadratic Dubiner polynomials to vanish.
There are two linear and three quadratics, so these are five
constraints
* v.n must be linear on each edge. Since v.n is cubic, we need
the integral of v.n against the cubic and quadratic Legendre
polynomial to vanish on each edge.
So we introduce functionals whose kernel describes this property,
as described in the FIAT paper.
"""
dofs = []
edge_dof_ids = {}
for entity_id in range(3):
dofs += [IntegralLegendreNormalMoment(cell, entity_id, 2, 6),
IntegralLegendreNormalMoment(cell, entity_id, 3, 6)]
edge_dof_ids[entity_id] = [offset, offset+1]
offset += 2
cell_dofs = DivergenceDubinerMoments(cell, 1, 2, 6)
dofs.extend(cell_dofs)
cell_dof_ids = list(range(offset, offset+len(cell_dofs)))
return (dofs, edge_dof_ids, cell_dof_ids)
class MardalTaiWinther(CiarletElement):
"""The definition of the Mardal-Tai-Winther element.
"""
def __init__(self, cell, degree=3):
assert degree == 3, "Only defined for degree 3"
assert cell.get_spatial_dimension() == 2, "Only defined for dimension 2"
# polynomial space
Ps = ONPolynomialSet(cell, degree, (2,))
# degrees of freedom
Ls = MardalTaiWintherDual(cell, degree)
# mapping under affine transformation
mapping = "contravariant piola"
super(MardalTaiWinther, self).__init__(Ps, Ls, degree,
mapping=mapping)
fiat-2019.2.0~git20210419.7d418fa/FIAT/mixed.py 0000664 0000000 0000000 00000007315 14135323752 0017710 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2010 Anders Logg
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import numpy
from operator import add
from functools import partial
from FIAT.dual_set import DualSet
from FIAT.finite_element import FiniteElement
class MixedElement(FiniteElement):
"""A FIAT-like representation of a mixed element.
:arg elements: An iterable of FIAT elements.
:arg ref_el: The reference element (optional).
This object offers tabulation of the concatenated basis function
tables along with an entity_dofs dict."""
def __init__(self, elements, ref_el=None):
elements = tuple(elements)
cells = set(e.get_reference_element() for e in elements)
if ref_el is not None:
cells.add(ref_el)
ref_el, = cells
# These functionals are absolutely wrong, they all map from
# functions of the wrong shape, and potentially of different
# shapes. However, they are wrong precisely as FFC hacks
# expect them to be. :(
nodes = [L for e in elements for L in e.dual_basis()]
entity_dofs = concatenate_entity_dofs(ref_el, elements)
dual = DualSet(nodes, ref_el, entity_dofs)
super(MixedElement, self).__init__(ref_el, dual, None, mapping=None)
self._elements = elements
def elements(self):
return self._elements
def num_sub_elements(self):
return len(self._elements)
def value_shape(self):
return (sum(numpy.prod(e.value_shape(), dtype=int) for e in self.elements()), )
def mapping(self):
return [m for e in self._elements for m in e.mapping()]
def get_nodal_basis(self):
raise NotImplementedError("get_nodal_basis not implemented")
def tabulate(self, order, points, entity=None):
"""Tabulate a mixed element by appropriately splatting
together the tabulation of the individual elements.
"""
shape = (self.space_dimension(),) + self.value_shape() + (len(points),)
output = {}
sub_dims = [0] + list(e.space_dimension() for e in self.elements())
sub_cmps = [0] + list(numpy.prod(e.value_shape(), dtype=int)
for e in self.elements())
irange = numpy.cumsum(sub_dims)
crange = numpy.cumsum(sub_cmps)
for i, e in enumerate(self.elements()):
table = e.tabulate(order, points, entity)
for d, tab in table.items():
try:
arr = output[d]
except KeyError:
arr = numpy.zeros(shape, dtype=tab.dtype)
output[d] = arr
ir = irange[i:i+2]
cr = crange[i:i+2]
tab = tab.reshape(ir[1] - ir[0], cr[1] - cr[0], -1)
arr[slice(*ir), slice(*cr)] = tab
return output
def is_nodal(self):
"""True if primal and dual bases are orthogonal."""
return all(e.is_nodal() for e in self._elements)
def concatenate_entity_dofs(ref_el, elements):
"""Combine the entity_dofs from a list of elements into a combined
entity_dof containing the information for the concatenated DoFs of
all the elements."""
entity_dofs = {dim: {i: [] for i in entities}
for dim, entities in ref_el.get_topology().items()}
offsets = numpy.cumsum([0] + list(e.space_dimension()
for e in elements), dtype=int)
for i, d in enumerate(e.entity_dofs() for e in elements):
for dim, dofs in d.items():
for ent, off in dofs.items():
entity_dofs[dim][ent] += list(map(partial(add, offsets[i]), off))
return entity_dofs
fiat-2019.2.0~git20210419.7d418fa/FIAT/morley.py 0000664 0000000 0000000 00000003322 14135323752 0020103 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008 Robert C. Kirby (Texas Tech University)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT import finite_element, polynomial_set, dual_set, functional
from FIAT.reference_element import TRIANGLE
class MorleyDualSet(dual_set.DualSet):
"""The dual basis for Lagrange elements. This class works for
simplices of any dimension. Nodes are point evaluation at
equispaced points."""
def __init__(self, ref_el):
entity_ids = {}
nodes = []
cur = 0
# make nodes by getting points
# need to do this dimension-by-dimension, facet-by-facet
top = ref_el.get_topology()
verts = ref_el.get_vertices()
if ref_el.get_shape() != TRIANGLE:
raise ValueError("Morley only defined on triangles")
# vertex point evaluations
entity_ids[0] = {}
for v in sorted(top[0]):
nodes.append(functional.PointEvaluation(ref_el, verts[v]))
entity_ids[0][v] = [cur]
cur += 1
# edge dof -- normal at each edge midpoint
entity_ids[1] = {}
for e in sorted(top[1]):
pt = ref_el.make_points(1, e, 2)[0]
n = functional.PointNormalDerivative(ref_el, e, pt)
nodes.append(n)
entity_ids[1][e] = [cur]
cur += 1
entity_ids[2] = {0: []}
super().__init__(nodes, ref_el, entity_ids)
class Morley(finite_element.CiarletElement):
"""The Morley finite element."""
def __init__(self, ref_el):
poly_set = polynomial_set.ONPolynomialSet(ref_el, 2)
dual = MorleyDualSet(ref_el)
super().__init__(poly_set, dual, 2)
fiat-2019.2.0~git20210419.7d418fa/FIAT/nedelec.py 0000664 0000000 0000000 00000037171 14135323752 0020204 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008 Robert C. Kirby (Texas Tech University)
# Modified by Andrew T. T. McRae (Imperial College London)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT import (polynomial_set, expansions, quadrature, dual_set,
finite_element, functional)
from itertools import chain
import numpy
from FIAT.check_format_variant import check_format_variant
def NedelecSpace2D(ref_el, k):
"""Constructs a basis for the 2d H(curl) space of the first kind
which is (P_k)^2 + P_k rot( x )"""
sd = ref_el.get_spatial_dimension()
if sd != 2:
raise Exception("NedelecSpace2D requires 2d reference element")
vec_Pkp1 = polynomial_set.ONPolynomialSet(ref_el, k + 1, (sd,))
dimPkp1 = expansions.polynomial_dimension(ref_el, k + 1)
dimPk = expansions.polynomial_dimension(ref_el, k)
dimPkm1 = expansions.polynomial_dimension(ref_el, k - 1)
vec_Pk_indices = list(chain(*(range(i * dimPkp1, i * dimPkp1 + dimPk)
for i in range(sd))))
vec_Pk_from_Pkp1 = vec_Pkp1.take(vec_Pk_indices)
Pkp1 = polynomial_set.ONPolynomialSet(ref_el, k + 1)
PkH = Pkp1.take(list(range(dimPkm1, dimPk)))
Q = quadrature.make_quadrature(ref_el, 2 * k + 2)
Qpts = numpy.array(Q.get_points())
Qwts = numpy.array(Q.get_weights())
zero_index = tuple([0 for i in range(sd)])
PkH_at_Qpts = PkH.tabulate(Qpts)[zero_index]
Pkp1_at_Qpts = Pkp1.tabulate(Qpts)[zero_index]
PkH_crossx_coeffs = numpy.zeros((PkH.get_num_members(),
sd,
Pkp1.get_num_members()), "d")
def rot_x_foo(a):
if a == 0:
return 1, 1.0
elif a == 1:
return 0, -1.0
for i in range(PkH.get_num_members()):
for j in range(sd):
(ind, sign) = rot_x_foo(j)
for k in range(Pkp1.get_num_members()):
PkH_crossx_coeffs[i, j, k] = sign * sum(Qwts * PkH_at_Qpts[i, :] * Qpts[:, ind] * Pkp1_at_Qpts[k, :])
# for l in range( len( Qpts ) ):
# PkH_crossx_coeffs[i,j,k] += Qwts[ l ] \
# * PkH_at_Qpts[i,l] \
# * Qpts[l][ind] \
# * Pkp1_at_Qpts[k,l] \
# * sign
PkHcrossx = polynomial_set.PolynomialSet(ref_el,
k + 1,
k + 1,
vec_Pkp1.get_expansion_set(),
PkH_crossx_coeffs,
vec_Pkp1.get_dmats())
return polynomial_set.polynomial_set_union_normalized(vec_Pk_from_Pkp1,
PkHcrossx)
def NedelecSpace3D(ref_el, k):
"""Constructs a nodal basis for the 3d first-kind Nedelec space"""
sd = ref_el.get_spatial_dimension()
if sd != 3:
raise Exception("NedelecSpace3D requires 3d reference element")
vec_Pkp1 = polynomial_set.ONPolynomialSet(ref_el, k + 1, (sd,))
dimPkp1 = expansions.polynomial_dimension(ref_el, k + 1)
dimPk = expansions.polynomial_dimension(ref_el, k)
if k > 0:
dimPkm1 = expansions.polynomial_dimension(ref_el, k - 1)
else:
dimPkm1 = 0
vec_Pk_indices = list(chain(*(range(i * dimPkp1, i * dimPkp1 + dimPk)
for i in range(sd))))
vec_Pk = vec_Pkp1.take(vec_Pk_indices)
vec_Pke_indices = list(chain(*(range(i * dimPkp1 + dimPkm1, i * dimPkp1 + dimPk)
for i in range(sd))))
vec_Pke = vec_Pkp1.take(vec_Pke_indices)
Pkp1 = polynomial_set.ONPolynomialSet(ref_el, k + 1)
Q = quadrature.make_quadrature(ref_el, 2 * (k + 1))
Qpts = numpy.array(Q.get_points())
Qwts = numpy.array(Q.get_weights())
zero_index = tuple([0 for i in range(sd)])
PkCrossXcoeffs = numpy.zeros((vec_Pke.get_num_members(),
sd,
Pkp1.get_num_members()), "d")
Pke_qpts = vec_Pke.tabulate(Qpts)[zero_index]
Pkp1_at_Qpts = Pkp1.tabulate(Qpts)[zero_index]
for i in range(vec_Pke.get_num_members()):
for j in range(sd): # vector components
qwts_cur_bf_val = (
Qpts[:, (j + 2) % 3] * Pke_qpts[i, (j + 1) % 3, :] -
Qpts[:, (j + 1) % 3] * Pke_qpts[i, (j + 2) % 3, :]) * Qwts
PkCrossXcoeffs[i, j, :] = numpy.dot(Pkp1_at_Qpts, qwts_cur_bf_val)
# for k in range( Pkp1.get_num_members() ):
# PkCrossXcoeffs[i,j,k] = sum( Qwts * cur_bf_val * Pkp1_at_Qpts[k,:] )
# for l in range( len( Qpts ) ):
# cur_bf_val = Qpts[l][(j+2)%3] \
# * Pke_qpts[i,(j+1)%3,l] \
# - Qpts[l][(j+1)%3] \
# * Pke_qpts[i,(j+2)%3,l]
# PkCrossXcoeffs[i,j,k] += Qwts[l] \
# * cur_bf_val \
# * Pkp1_at_Qpts[k,l]
PkCrossX = polynomial_set.PolynomialSet(ref_el,
k + 1,
k + 1,
vec_Pkp1.get_expansion_set(),
PkCrossXcoeffs,
vec_Pkp1.get_dmats())
return polynomial_set.polynomial_set_union_normalized(vec_Pk, PkCrossX)
class NedelecDual2D(dual_set.DualSet):
"""Dual basis for first-kind Nedelec in 2D."""
def __init__(self, ref_el, degree, variant, quad_deg):
sd = ref_el.get_spatial_dimension()
if sd != 2:
raise Exception("Nedelec2D only works on triangles")
nodes = []
t = ref_el.get_topology()
if variant == "integral":
# edge nodes are \int_F v\cdot t p ds where p \in P_{q-1}(edge)
# degree is q - 1
edge = ref_el.get_facet_element()
Q = quadrature.make_quadrature(edge, quad_deg)
Pq = polynomial_set.ONPolynomialSet(edge, degree)
Pq_at_qpts = Pq.tabulate(Q.get_points())[tuple([0]*(sd - 1))]
for e in range(len(t[sd - 1])):
for i in range(Pq_at_qpts.shape[0]):
phi = Pq_at_qpts[i, :]
nodes.append(functional.IntegralMomentOfEdgeTangentEvaluation(ref_el, Q, phi, e))
# internal nodes. These are \int_T v \cdot p dx where p \in P_{q-2}^2
if degree > 0:
Q = quadrature.make_quadrature(ref_el, quad_deg)
qpts = Q.get_points()
Pkm1 = polynomial_set.ONPolynomialSet(ref_el, degree - 1)
zero_index = tuple([0 for i in range(sd)])
Pkm1_at_qpts = Pkm1.tabulate(qpts)[zero_index]
for d in range(sd):
for i in range(Pkm1_at_qpts.shape[0]):
phi_cur = Pkm1_at_qpts[i, :]
l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,))
nodes.append(l_cur)
elif variant == "point":
num_edges = len(t[1])
# edge tangents
for i in range(num_edges):
pts_cur = ref_el.make_points(1, i, degree + 2)
for j in range(len(pts_cur)):
pt_cur = pts_cur[j]
f = functional.PointEdgeTangentEvaluation(ref_el, i, pt_cur)
nodes.append(f)
# internal moments
if degree > 0:
Q = quadrature.make_quadrature(ref_el, 2 * (degree + 1))
qpts = Q.get_points()
Pkm1 = polynomial_set.ONPolynomialSet(ref_el, degree - 1)
zero_index = tuple([0 for i in range(sd)])
Pkm1_at_qpts = Pkm1.tabulate(qpts)[zero_index]
for d in range(sd):
for i in range(Pkm1_at_qpts.shape[0]):
phi_cur = Pkm1_at_qpts[i, :]
l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,))
nodes.append(l_cur)
entity_ids = {}
# set to empty
for i in range(sd + 1):
entity_ids[i] = {}
for j in range(len(t[i])):
entity_ids[i][j] = []
cur = 0
# edges
num_edge_pts = len(ref_el.make_points(1, 0, degree + 2))
for i in range(len(t[1])):
entity_ids[1][i] = list(range(cur, cur + num_edge_pts))
cur += num_edge_pts
# moments against P_{degree-1} internally, if degree > 0
if degree > 0:
num_internal_dof = sd * Pkm1_at_qpts.shape[0]
entity_ids[2][0] = list(range(cur, cur + num_internal_dof))
super(NedelecDual2D, self).__init__(nodes, ref_el, entity_ids)
class NedelecDual3D(dual_set.DualSet):
"""Dual basis for first-kind Nedelec in 3D."""
def __init__(self, ref_el, degree, variant, quad_deg):
sd = ref_el.get_spatial_dimension()
if sd != 3:
raise Exception("NedelecDual3D only works on tetrahedra")
nodes = []
t = ref_el.get_topology()
if variant == "integral":
# edge nodes are \int_F v\cdot t p ds where p \in P_{q-1}(edge)
# degree is q - 1
edge = ref_el.get_facet_element().get_facet_element()
Q = quadrature.make_quadrature(edge, quad_deg)
Pq = polynomial_set.ONPolynomialSet(edge, degree)
Pq_at_qpts = Pq.tabulate(Q.get_points())[tuple([0]*(1))]
for e in range(len(t[1])):
for i in range(Pq_at_qpts.shape[0]):
phi = Pq_at_qpts[i, :]
nodes.append(functional.IntegralMomentOfEdgeTangentEvaluation(ref_el, Q, phi, e))
# face nodes are \int_F v\cdot p dA where p \in P_{q-2}(f)^3 with p \cdot n = 0 (cmp. Monk)
# these are equivalent to dofs from Fenics book defined by
# \int_F v\times n \cdot p ds where p \in P_{q-2}(f)^2
if degree > 0:
facet = ref_el.get_facet_element()
Q = quadrature.make_quadrature(facet, quad_deg)
Pq = polynomial_set.ONPolynomialSet(facet, degree-1, (sd,))
Pq_at_qpts = Pq.tabulate(Q.get_points())[(0, 0)]
for f in range(len(t[2])):
# R is used to map [1,0,0] to tangent1 and [0,1,0] to tangent2
R = ref_el.compute_face_tangents(f)
# Skip last functionals because we only want p with p \cdot n = 0
for i in range(2 * Pq.get_num_members() // 3):
phi = Pq_at_qpts[i, ...]
phi = numpy.matmul(phi[:-1, ...].T, R)
nodes.append(functional.MonkIntegralMoment(ref_el, Q, phi, f))
# internal nodes. These are \int_T v \cdot p dx where p \in P_{q-3}^3(T)
if degree > 1:
Q = quadrature.make_quadrature(ref_el, quad_deg)
qpts = Q.get_points()
Pkm2 = polynomial_set.ONPolynomialSet(ref_el, degree - 2)
zero_index = tuple([0 for i in range(sd)])
Pkm2_at_qpts = Pkm2.tabulate(qpts)[zero_index]
for d in range(sd):
for i in range(Pkm2_at_qpts.shape[0]):
phi_cur = Pkm2_at_qpts[i, :]
l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,))
nodes.append(l_cur)
elif variant == "point":
num_edges = len(t[1])
for i in range(num_edges):
# points to specify P_k on each edge
pts_cur = ref_el.make_points(1, i, degree + 2)
for j in range(len(pts_cur)):
pt_cur = pts_cur[j]
f = functional.PointEdgeTangentEvaluation(ref_el, i, pt_cur)
nodes.append(f)
if degree > 0: # face tangents
num_faces = len(t[2])
for i in range(num_faces): # loop over faces
pts_cur = ref_el.make_points(2, i, degree + 2)
for j in range(len(pts_cur)): # loop over points
pt_cur = pts_cur[j]
for k in range(2): # loop over tangents
f = functional.PointFaceTangentEvaluation(ref_el, i, k, pt_cur)
nodes.append(f)
if degree > 1: # internal moments
Q = quadrature.make_quadrature(ref_el, 2 * (degree + 1))
qpts = Q.get_points()
Pkm2 = polynomial_set.ONPolynomialSet(ref_el, degree - 2)
zero_index = tuple([0 for i in range(sd)])
Pkm2_at_qpts = Pkm2.tabulate(qpts)[zero_index]
for d in range(sd):
for i in range(Pkm2_at_qpts.shape[0]):
phi_cur = Pkm2_at_qpts[i, :]
f = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,))
nodes.append(f)
entity_ids = {}
# set to empty
for i in range(sd + 1):
entity_ids[i] = {}
for j in range(len(t[i])):
entity_ids[i][j] = []
cur = 0
# edge dof
num_pts_per_edge = len(ref_el.make_points(1, 0, degree + 2))
for i in range(len(t[1])):
entity_ids[1][i] = list(range(cur, cur + num_pts_per_edge))
cur += num_pts_per_edge
# face dof
if degree > 0:
num_pts_per_face = len(ref_el.make_points(2, 0, degree + 2))
for i in range(len(t[2])):
entity_ids[2][i] = list(range(cur, cur + 2 * num_pts_per_face))
cur += 2 * num_pts_per_face
if degree > 1:
num_internal_dof = Pkm2_at_qpts.shape[0] * sd
entity_ids[3][0] = list(range(cur, cur + num_internal_dof))
super(NedelecDual3D, self).__init__(nodes, ref_el, entity_ids)
class Nedelec(finite_element.CiarletElement):
"""
Nedelec finite element
:arg ref_el: The reference element.
:arg k: The degree.
:arg variant: optional variant specifying the types of nodes.
variant can be chosen from ["point", "integral", "integral(quadrature_degree)"]
"point" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal
convergence order in the H(curl)-norm
"integral" -> dofs are evaluated by quadrature rule. The quadrature degree is chosen to integrate
polynomials of degree 5*k so that most expressions will be interpolated exactly. This is important
when you want to have (nearly) curl-preserving interpolation.
"integral(quadrature_degree)" -> dofs are evaluated by quadrature rule of degree quadrature_degree
"""
def __init__(self, ref_el, k, variant=None):
degree = k - 1
(variant, quad_deg) = check_format_variant(variant, degree, "Nedelec")
if ref_el.get_spatial_dimension() == 3:
poly_set = NedelecSpace3D(ref_el, degree)
dual = NedelecDual3D(ref_el, degree, variant, quad_deg)
elif ref_el.get_spatial_dimension() == 2:
poly_set = NedelecSpace2D(ref_el, degree)
dual = NedelecDual2D(ref_el, degree, variant, quad_deg)
else:
raise Exception("Not implemented")
formdegree = 1 # 1-form
super(Nedelec, self).__init__(poly_set, dual, degree, formdegree,
mapping="covariant piola")
fiat-2019.2.0~git20210419.7d418fa/FIAT/nedelec_second_kind.py 0000664 0000000 0000000 00000023307 14135323752 0022540 0 ustar 00root root 0000000 0000000 # Copyright (C) 2010-2012 Marie E. Rognes
# Modified by Andrew T. T. McRae (Imperial College London)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import numpy
from FIAT.finite_element import CiarletElement
from FIAT.dual_set import DualSet
from FIAT.polynomial_set import ONPolynomialSet
from FIAT.functional import PointEdgeTangentEvaluation as Tangent
from FIAT.functional import FrobeniusIntegralMoment as IntegralMoment
from FIAT.raviart_thomas import RaviartThomas
from FIAT.quadrature import make_quadrature, UFCTetrahedronFaceQuadratureRule
from FIAT.reference_element import UFCTetrahedron
from FIAT.check_format_variant import check_format_variant
from FIAT import polynomial_set, quadrature, functional
class NedelecSecondKindDual(DualSet):
r"""
This class represents the dual basis for the Nedelec H(curl)
elements of the second kind. The degrees of freedom (L) for the
elements of the k'th degree are
d = 2:
vertices: None
edges: L(f) = f (x_i) * t for (k+1) points x_i on each edge
cell: L(f) = \int f * g * dx for g in RT_{k-1}
d = 3:
vertices: None
edges: L(f) = f(x_i) * t for (k+1) points x_i on each edge
faces: L(f) = \int_F f * g * ds for g in RT_{k-1}(F) for each face F
cell: L(f) = \int f * g * dx for g in RT_{k-2}
Higher spatial dimensions are not yet implemented. (For d = 1,
these elements coincide with the CG_k elements.)
"""
def __init__(self, cell, degree, variant, quad_deg):
# Define degrees of freedom
(dofs, ids) = self.generate_degrees_of_freedom(cell, degree, variant, quad_deg)
# Call init of super-class
super(NedelecSecondKindDual, self).__init__(dofs, cell, ids)
def generate_degrees_of_freedom(self, cell, degree, variant, quad_deg):
"Generate dofs and geometry-to-dof maps (ids)."
dofs = []
ids = {}
# Extract spatial dimension and topology
d = cell.get_spatial_dimension()
assert (d in (2, 3)), "Second kind Nedelecs only implemented in 2/3D."
# Zero vertex-based degrees of freedom (d+1 of these)
ids[0] = dict(list(zip(list(range(d + 1)), ([] for i in range(d + 1)))))
# (d+1) degrees of freedom per entity of codimension 1 (edges)
(edge_dofs, edge_ids) = self._generate_edge_dofs(cell, degree, 0, variant, quad_deg)
dofs.extend(edge_dofs)
ids[1] = edge_ids
# Include face degrees of freedom if 3D
if d == 3:
(face_dofs, face_ids) = self._generate_face_dofs(cell, degree,
len(dofs), variant, quad_deg)
dofs.extend(face_dofs)
ids[2] = face_ids
# Varying degrees of freedom (possibly zero) per cell
(cell_dofs, cell_ids) = self._generate_cell_dofs(cell, degree, len(dofs), variant, quad_deg)
dofs.extend(cell_dofs)
ids[d] = cell_ids
return (dofs, ids)
def _generate_edge_dofs(self, cell, degree, offset, variant, quad_deg):
"""Generate degrees of freedoms (dofs) for entities of
codimension 1 (edges)."""
# (degree+1) tangential component point evaluation degrees of
# freedom per entity of codimension 1 (edges)
dofs = []
ids = {}
if variant == "integral":
edge = cell.construct_subelement(1)
Q = quadrature.make_quadrature(edge, quad_deg)
Pq = polynomial_set.ONPolynomialSet(edge, degree)
Pq_at_qpts = Pq.tabulate(Q.get_points())[tuple([0]*(1))]
for e in range(len(cell.get_topology()[1])):
for i in range(Pq_at_qpts.shape[0]):
phi = Pq_at_qpts[i, :]
dofs.append(functional.IntegralMomentOfEdgeTangentEvaluation(cell, Q, phi, e))
jj = Pq_at_qpts.shape[0] * e
ids[e] = list(range(offset + jj, offset + jj + Pq_at_qpts.shape[0]))
elif variant == "point":
for edge in range(len(cell.get_topology()[1])):
# Create points for evaluation of tangential components
points = cell.make_points(1, edge, degree + 2)
# A tangential component evaluation for each point
dofs += [Tangent(cell, edge, point) for point in points]
# Associate these dofs with this edge
i = len(points) * edge
ids[edge] = list(range(offset + i, offset + i + len(points)))
return (dofs, ids)
def _generate_face_dofs(self, cell, degree, offset, variant, quad_deg):
"""Generate degrees of freedoms (dofs) for faces."""
# Initialize empty dofs and identifiers (ids)
dofs = []
ids = dict(list(zip(list(range(4)), ([] for i in range(4)))))
# Return empty info if not applicable
d = cell.get_spatial_dimension()
if (degree < 2):
return (dofs, ids)
msg = "2nd kind Nedelec face dofs only available with UFC convention"
assert isinstance(cell, UFCTetrahedron), msg
# Iterate over the faces of the tet
num_faces = len(cell.get_topology()[2])
for face in range(num_faces):
# Construct quadrature scheme for this face
m = 2 * (degree + 1)
Q_face = UFCTetrahedronFaceQuadratureRule(face, m)
# Construct Raviart-Thomas of (degree - 1) on the
# reference face
reference_face = Q_face.reference_rule().ref_el
RT = RaviartThomas(reference_face, degree - 1, variant)
num_rts = RT.space_dimension()
# Evaluate RT basis functions at reference quadrature
# points
ref_quad_points = Q_face.reference_rule().get_points()
num_quad_points = len(ref_quad_points)
Phi = RT.get_nodal_basis()
Phis = Phi.tabulate(ref_quad_points)[(0, 0)]
# Note: Phis has dimensions:
# num_basis_functions x num_components x num_quad_points
# Map Phis -> phis (reference values to physical values)
J = Q_face.jacobian()
scale = 1.0 / numpy.sqrt(numpy.linalg.det(numpy.dot(J.T, J)))
phis = numpy.ndarray((d, num_quad_points))
for i in range(num_rts):
for q in range(num_quad_points):
phi_i_q = scale * numpy.dot(J, Phis[numpy.newaxis, i, :, q].T)
for j in range(d):
phis[j, q] = phi_i_q[j]
# Construct degrees of freedom as integral moments on
# this cell, using the special face quadrature
# weighted against the values of the (physical)
# Raviart--Thomas'es on the face
dofs += [IntegralMoment(cell, Q_face, phis)]
# Assign identifiers (num RTs per face + previous edge dofs)
ids[face] = list(range(offset + num_rts*face, offset + num_rts*(face + 1)))
return (dofs, ids)
def _generate_cell_dofs(self, cell, degree, offset, variant, quad_deg):
"""Generate degrees of freedoms (dofs) for entities of
codimension d (cells)."""
# Return empty info if not applicable
d = cell.get_spatial_dimension()
if (d == 2 and degree < 2) or (d == 3 and degree < 3):
return ([], {0: []})
# Create quadrature points
Q = make_quadrature(cell, 2 * (degree + 1))
qs = Q.get_points()
# Create Raviart-Thomas nodal basis
RT = RaviartThomas(cell, degree + 1 - d, variant)
phi = RT.get_nodal_basis()
# Evaluate Raviart-Thomas basis at quadrature points
phi_at_qs = phi.tabulate(qs)[(0,) * d]
# Use (Frobenius) integral moments against RTs as dofs
dofs = [IntegralMoment(cell, Q, phi_at_qs[i, :])
for i in range(len(phi_at_qs))]
# Associate these dofs with the interior
ids = {0: list(range(offset, offset + len(dofs)))}
return (dofs, ids)
class NedelecSecondKind(CiarletElement):
"""
The H(curl) Nedelec elements of the second kind on triangles and
tetrahedra: the polynomial space described by the full polynomials
of degree k, with a suitable set of degrees of freedom to ensure
H(curl) conformity.
:arg ref_el: The reference element.
:arg k: The degree.
:arg variant: optional variant specifying the types of nodes.
variant can be chosen from ["point", "integral", "integral(quadrature_degree)"]
"point" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal
convergence order in the H(curl)-norm
"integral" -> dofs are evaluated by quadrature rule. The quadrature degree is chosen to integrate
polynomials of degree 5*k so that most expressions will be interpolated exactly. This is important
when you want to have (nearly) curl-preserving interpolation.
"integral(quadrature_degree)" -> dofs are evaluated by quadrature rule of degree quadrature_degree
"""
def __init__(self, cell, k, variant=None):
(variant, quad_deg) = check_format_variant(variant, k, "Nedelec Second Kind")
# Check degree
assert k >= 1, "Second kind Nedelecs start at 1!"
# Get dimension
d = cell.get_spatial_dimension()
# Construct polynomial basis for d-vector fields
Ps = ONPolynomialSet(cell, k, (d, ))
# Construct dual space
Ls = NedelecSecondKindDual(cell, k, variant, quad_deg)
# Set form degree
formdegree = 1 # 1-form
# Set mapping
mapping = "covariant piola"
# Call init of super-class
super(NedelecSecondKind, self).__init__(Ps, Ls, k, formdegree, mapping=mapping)
fiat-2019.2.0~git20210419.7d418fa/FIAT/nodal_enriched.py 0000664 0000000 0000000 00000011716 14135323752 0021540 0 ustar 00root root 0000000 0000000 # Copyright (C) 2013 Andrew T. T. McRae, 2015-2016 Jan Blechta
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import numpy as np
from FIAT.polynomial_set import PolynomialSet
from FIAT.dual_set import DualSet
from FIAT.finite_element import CiarletElement
__all__ = ['NodalEnrichedElement']
class NodalEnrichedElement(CiarletElement):
"""NodalEnriched element is a direct sum of a sequence of
finite elements. Dual basis is reorthogonalized to the
primal basis for nodality.
The following is equivalent:
* the constructor is well-defined,
* the resulting element is unisolvent and its basis is nodal,
* the supplied elements are unisolvent with nodal basis and
their primal bases are mutually linearly independent,
* the supplied elements are unisolvent with nodal basis and
their dual bases are mutually linearly independent.
"""
def __init__(self, *elements):
# Test elements are nodal
if not all(e.is_nodal() for e in elements):
raise ValueError("Not all elements given for construction "
"of NodalEnrichedElement are nodal")
# Extract common data
ref_el = elements[0].get_reference_element()
expansion_set = elements[0].get_nodal_basis().get_expansion_set()
degree = min(e.get_nodal_basis().get_degree() for e in elements)
embedded_degree = max(e.get_nodal_basis().get_embedded_degree()
for e in elements)
order = max(e.get_order() for e in elements)
mapping = elements[0].mapping()[0]
formdegree = None if any(e.get_formdegree() is None for e in elements) \
else max(e.get_formdegree() for e in elements)
value_shape = elements[0].value_shape()
# Sanity check
assert all(e.get_nodal_basis().get_reference_element() ==
ref_el for e in elements)
assert all(type(e.get_nodal_basis().get_expansion_set()) ==
type(expansion_set) for e in elements)
assert all(e_mapping == mapping for e in elements
for e_mapping in e.mapping())
assert all(e.value_shape() == value_shape for e in elements)
# Merge polynomial sets
coeffs = _merge_coeffs([e.get_coeffs() for e in elements])
dmats = _merge_dmats([e.dmats() for e in elements])
poly_set = PolynomialSet(ref_el,
degree,
embedded_degree,
expansion_set,
coeffs,
dmats)
# Renumber dof numbers
offsets = np.cumsum([0] + [e.space_dimension() for e in elements[:-1]])
entity_ids = _merge_entity_ids((e.entity_dofs() for e in elements),
offsets)
# Merge dual bases
nodes = [node for e in elements for node in e.dual_basis()]
dual_set = DualSet(nodes, ref_el, entity_ids)
# CiarletElement constructor adjusts poly_set coefficients s.t.
# dual_set is really dual to poly_set
super(NodalEnrichedElement, self).__init__(poly_set, dual_set, order,
formdegree=formdegree, mapping=mapping)
def _merge_coeffs(coeffss):
# Number of bases members
total_dim = sum(c.shape[0] for c in coeffss)
# Value shape
value_shape = coeffss[0].shape[1:-1]
assert all(c.shape[1:-1] == value_shape for c in coeffss)
# Number of expansion polynomials
max_expansion_dim = max(c.shape[-1] for c in coeffss)
# Compose new coeffs
shape = (total_dim,) + value_shape + (max_expansion_dim,)
new_coeffs = np.zeros(shape, dtype=coeffss[0].dtype)
counter = 0
for c in coeffss:
dim = c.shape[0]
expansion_dim = c.shape[-1]
new_coeffs[counter:counter+dim, ..., :expansion_dim] = c
counter += dim
assert counter == total_dim
return new_coeffs
def _merge_dmats(dmatss):
shape, arg = max((dmats[0].shape, args) for args, dmats in enumerate(dmatss))
assert len(shape) == 2 and shape[0] == shape[1]
new_dmats = []
for dim in range(len(dmatss[arg])):
new_dmats.append(dmatss[arg][dim].copy())
for dmats in dmatss:
sl = slice(0, dmats[dim].shape[0]), slice(0, dmats[dim].shape[1])
assert np.allclose(dmats[dim], new_dmats[dim][sl]), \
"dmats of elements to be directly summed are not matching!"
return new_dmats
def _merge_entity_ids(entity_ids, offsets):
ret = {}
for i, ids in enumerate(entity_ids):
for dim in ids:
if not ret.get(dim):
ret[dim] = {}
for entity in ids[dim]:
if not ret[dim].get(entity):
ret[dim][entity] = []
ret[dim][entity] += (np.array(ids[dim][entity]) + offsets[i]).tolist()
return ret
fiat-2019.2.0~git20210419.7d418fa/FIAT/orthopoly.py 0000664 0000000 0000000 00000025217 14135323752 0020642 0 ustar 00root root 0000000 0000000 """
orthopoly.py - A suite of functions for generating orthogonal polynomials
and quadrature rules.
Copyright (c) 2014 Greg von Winckel
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Last updated on Wed Jan 1 14:29:25 MST 2014
Modified by David A. Ham (david.ham@imperial.ac.uk), 2016
"""
import numpy as np
from functools import reduce
from math import gamma
def gauss(alpha, beta):
"""
Compute the Gauss nodes and weights from the recursion
coefficients associated with a set of orthogonal polynomials
Inputs:
alpha - recursion coefficients
beta - recursion coefficients
Outputs:
x - quadrature nodes
w - quadrature weights
Adapted from the MATLAB code by Walter Gautschi
http://www.cs.purdue.edu/archives/2002/wxg/codes/gauss.m
"""
from numpy.linalg import eigh
A = np.diag(np.sqrt(beta)[1:], 1) + np.diag(alpha)
x, V = eigh(A, "U")
w = beta[0] * np.real(np.power(V[0, :], 2))
return x, w
def lobatto(alpha, beta, xl1, xl2):
"""
Compute the Lobatto nodes and weights with the preassigned
nodea xl1,xl2
Inputs:
alpha - recursion coefficients
beta - recursion coefficients
xl1 - assigned node location
xl2 - assigned node location
Outputs:
x - quadrature nodes
w - quadrature weights
Based on the section 7 of the paper
"Some modified matrix eigenvalue problems"
by Gene Golub, SIAM Review Vol 15, No. 2, April 1973, pp.318--334
"""
from numpy.linalg import solve
n = len(alpha) - 1
en = np.zeros(n)
en[-1] = 1
A1 = np.vstack((np.sqrt(beta), alpha - xl1))
J1 = np.diag(A1[0, 1:-1], 1) + np.diag(A1[1, 1:]) + np.diag(A1[0, 1:-1], -1)
A2 = np.vstack((np.sqrt(beta), alpha - xl2))
J2 = np.diag(A2[0, 1:-1], 1) + np.diag(A2[1, 1:]) + np.diag(A2[0, 1:-1], -1)
g1 = solve(J1, en)
g2 = solve(J2, en)
C = np.array(((1, -g1[-1]), (1, -g2[-1])))
xl = np.array((xl1, xl2))
ab = solve(C, xl)
alphal = alpha
alphal[-1] = ab[0]
betal = beta
betal[-1] = ab[1]
x, w = gauss(alphal, betal)
return x, w
def rec_jacobi(N, a, b):
"""
Generate the recursion coefficients alpha_k, beta_k
P_{k+1}(x) = (x-alpha_k)*P_{k}(x) - beta_k P_{k-1}(x)
for the Jacobi polynomials which are orthogonal on [-1,1]
with respect to the weight w(x)=[(1-x)^a]*[(1+x)^b]
Inputs:
N - polynomial order
a - weight parameter
b - weight parameter
Outputs:
alpha - recursion coefficients
beta - recursion coefficients
Adapted from the MATLAB code by Dirk Laurie and Walter Gautschi
http://www.cs.purdue.edu/archives/2002/wxg/codes/r_jacobi.m
"""
nu = (b - a) / float(a + b + 2)
mu = 2 ** (a + b + 1) * gamma(a + 1) * gamma(b + 1) / gamma(a + b + 2)
if N == 1:
alpha = nu
beta = mu
else:
n = np.arange(1.0, N)
nab = 2 * n + a + b
alpha = np.hstack((nu, (b ** 2 - a ** 2) / (nab * (nab + 2))))
n = n[1:]
nab = nab[1:]
B1 = 4 * (a + 1) * (b + 1) / float((a + b + 2) ** 2 * (a + b + 3))
B = 4 * (n + a) * (n + b) * n * (n + a + b) / \
(nab ** 2 * (nab + 1) * (nab - 1))
beta = np.hstack((mu, B1, B))
return alpha, beta
def rec_jacobi01(N, a, b):
"""
Generate the recursion coefficients alpha_k, beta_k
for the Jacobi polynomials which are orthogonal on [0,1]
See rec_jacobi for the recursion coefficients on [-1,1]
Inputs:
N - polynomial order
a - weight parameter
b - weight parameter
Outputs:
alpha - recursion coefficients
beta - recursion coefficients
Adapted from the MATLAB implementation:
https://www.cs.purdue.edu/archives/2002/wxg/codes/r_jacobi01.m
"""
if a <= -1 or b <= -1:
raise ValueError('''Jacobi coefficients are defined only
for alpha,beta > -1''')
if not isinstance(N, int):
raise TypeError('N must be an integer')
if N < 1:
raise ValueError('N must be at least 1')
c, d = rec_jacobi(N, a, b)
alpha = (1 + c) / 2
beta = d / 4
beta[0] = d[0] / 2 ** (a + b + 1)
return alpha, beta
def polyval(alpha, beta, x):
"""
Evaluate polynomials on x given the recursion coefficients alpha and beta
"""
N = len(alpha)
m = len(x)
P = np.zeros((m, N + 1))
P[:, 0] = 1
P[:, 1] = (x - alpha[0]) * P[:, 0]
for k in range(1, N):
P[:, k + 1] = (x - alpha[k]) * P[:, k] - beta[k] * P[:, k - 1]
return P
def jacobi(N, a, b, x, NOPT=1):
"""
JACOBI computes the Jacobi polynomials which are orthogonal on [-1,1]
with respect to the weight w(x)=[(1-x)^a]*[(1+x)^b] and evaluate them
on the given grid up to P_N(x). Setting NOPT=2 returns the
L2-normalized polynomials
"""
m = len(x)
P = np.zeros((m, N + 1))
apb = a + b
a1 = a - 1
b1 = b - 1
c = apb * (a - b)
P[:, 0] = 1
if N > 0:
P[:, 1] = 0.5 * (a - b + (apb + 2) * x)
if N > 1:
for k in range(2, N + 1):
k2 = 2 * k
g = k2 + apb
g1 = g - 1
g2 = g - 2
d = 2.0 * (k + a1) * (k + b1) * g
P[:, k] = (g1 * (c + g2 * g * x) * P[:, k - 1] -
d * P[:, k - 2]) / (k2 * (k + apb) * g2)
if NOPT == 2:
k = np.arange(N + 1)
pnorm = 2 ** (apb + 1) * gamma(k + a + 1) * gamma(k + b + 1) / \
((2 * k + a + b + 1) * (gamma(k + 1) * gamma(k + a + b + 1)))
P *= 1 / np.sqrt(pnorm)
return P
def jacobiD(N, a, b, x, NOPT=1):
"""
JACOBID computes the first derivatives of the normalized Jacobi
polynomials which are orthogonal on [-1,1] with respect
to the weight w(x)=[(1-x)^a]*[(1+x)^b] and evaluate them
on the given grid up to P_N(x). Setting NOPT=2 returns
the derivatives of the L2-normalized polynomials
"""
z = np.zeros((len(x), 1))
if N == 0:
Px = z
else:
Px = 0.5 * np.hstack((z, jacobi(N - 1, a + 1, b + 1, x, NOPT) *
((a + b + 2 + np.arange(N)))))
return Px
def mm_log(N, a):
"""
MM_LOG Modified moments for a logarithmic weight function.
The call mm=MM_LOG(n,a) computes the first n modified moments of the
logarithmic weight function w(t)=t^a log(1/t) on [0,1] relative to
shifted Legendre polynomials.
REFERENCE: Walter Gautschi,``On the preceding paper `A Legendre
polynomial integral' by James L. Blue'',
Math. Comp. 33 (1979), 742-743.
Adapted from the MATLAB implementation:
https://www.cs.purdue.edu/archives/2002/wxg/codes/mm_log.m
"""
if a <= -1:
raise ValueError('Parameter a must be greater than -1')
prod = lambda z: reduce(lambda x, y: x * y, z, 1)
mm = np.zeros(N)
c = 1
for n in range(N):
if isinstance(a, int) and a < n:
p = range(n - a, n + a + 2)
mm[n] = (-1) ** (n - a) / prod(p)
mm[n] *= gamma(a + 1) ** 2
else:
if n == 0:
mm[0] = 1 / (a + 1) ** 2
else:
k = np.arange(1, n + 1)
s = 1 / (a + 1 + k) - 1 / (a + 1 - k)
p = (a + 1 - k) / (a + 1 + k)
mm[n] = (1 / (a + 1) + sum(s)) * prod(p) / (a + 1)
mm[n] *= c
c *= 0.5 * (n + 1) / (2 * n + 1)
return mm
def mod_chebyshev(N, mom, alpham, betam):
"""
Calcuate the recursion coefficients for the orthogonal polynomials
which are are orthogonal with respect to a weight function which is
represented in terms of its modifed moments which are obtained by
integrating the monic polynomials against the weight function.
References
----------
John C. Wheeler, "Modified moments and Gaussian quadratures"
Rocky Mountain Journal of Mathematics, Vol. 4, Num. 2 (1974), 287--296
Walter Gautschi, "Orthogonal Polynomials (in Matlab)
Journal of Computational and Applied Mathematics, Vol. 178 (2005) 215--234
Adapted from the MATLAB implementation:
https://www.cs.purdue.edu/archives/2002/wxg/codes/chebyshev.m
"""
if not isinstance(N, int):
raise TypeError('N must be an integer')
if N < 1:
raise ValueError('N must be at least 1')
N = min(N, int(len(mom) / 2))
alpha = np.zeros(N)
beta = np.zeros(N)
normsq = np.zeros(N)
sig = np.zeros((N + 1, 2 * N))
alpha[0] = alpham[0] + mom[1] / mom[0]
beta[0] = mom[0]
sig[1, :] = mom
for n in range(2, N + 1):
for m in range(n - 1, 2 * N - n + 1):
sig[n, m] = sig[n - 1, m + 1] - (alpha[n - 2] - alpham[m]) * sig[n - 1, m] - \
beta[n - 2] * sig[n - 2, m] + betam[m] * sig[n - 1, m - 1]
alpha[n - 1] = alpham[n - 1] + sig[n, n] / sig[n, n - 1] - sig[n - 1, n - 1] / \
sig[n - 1, n - 2]
beta[n - 1] = sig[n, n - 1] / sig[n - 1, n - 2]
normsq = np.diagonal(sig, -1)
return alpha, beta, normsq
def rec_jaclog(N, a):
"""
Generate the recursion coefficients alpha_k, beta_k
P_{k+1}(x) = (x-alpha_k)*P_{k}(x) - beta_k P_{k-1}(x)
for the monic polynomials which are orthogonal on [0,1]
with respect to the weight w(x)=x^a*log(1/x)
Inputs:
N - polynomial order
a - weight parameter
Outputs:
alpha - recursion coefficients
beta - recursion coefficients
Adated from the MATLAB code:
https://www.cs.purdue.edu/archives/2002/wxg/codes/r_jaclog.m
"""
alphaj, betaj = rec_jacobi01(2 * N, 0, 0)
mom = mm_log(2 * N, a)
alpha, beta, _ = mod_chebyshev(N, mom, alphaj, betaj)
return alpha, beta
fiat-2019.2.0~git20210419.7d418fa/FIAT/pointwise_dual.py 0000664 0000000 0000000 00000005075 14135323752 0021631 0 ustar 00root root 0000000 0000000 # Copyright (C) 2020 Robert C. Kirby (Baylor University)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
import numpy as np
from FIAT.functional import Functional
from FIAT.dual_set import DualSet
from collections import defaultdict
from itertools import zip_longest
def compute_pointwise_dual(el, pts):
"""Constructs a dual basis to the basis for el as a linear combination
of a set of pointwise evaluations. This is useful when the
prescribed finite element isn't Ciarlet (e.g. the basis functions
are provided explicitly as formulae). Alternately, the element's
given dual basis may involve differentiation, making run-time
interpolation difficult in FIAT clients. The pointwise dual,
consisting only of pointwise evaluations, will effectively replace
these derivatives with (automatically determined) finite
differences. This is exact on the polynomial space, but is an
approximation if applied to functions outside the space.
:param el: a :class:`FiniteElement`.
:param pts: an iterable of points with the same length as el's
dimension. These points must be unisolvent for the
polynomial space
:returns: a :class `DualSet`
"""
nbf = el.space_dimension()
T = el.ref_el
sd = T.get_spatial_dimension()
assert np.asarray(pts).shape == (int(nbf / np.prod(el.value_shape())), sd)
z = tuple([0] * sd)
nds = []
V = el.tabulate(0, pts)[z]
# Make a square system, invert, and then put it back in the right
# shape so we have (nbf, ..., npts) with more dimensions
# for vector or tensor-valued elements.
alphas = np.linalg.inv(V.reshape((nbf, -1)).T).reshape(V.shape)
# Each row of alphas gives the coefficients of a functional,
# represented, as elsewhere in FIAT, as a summation of
# components of the input at particular points.
# This logic picks out the points and components for which the
# weights are actually nonzero to construct the functional.
pts = np.asarray(pts)
for coeffs in alphas:
pt_dict = defaultdict(list)
nonzero = np.where(np.abs(coeffs) > 1.e-12)
*comp, pt_index = nonzero
for pt, coeff_comp in zip(pts[pt_index],
zip_longest(coeffs[nonzero],
zip(*comp), fillvalue=())):
pt_dict[tuple(pt)].append(coeff_comp)
nds.append(Functional(T, el.value_shape(), dict(pt_dict), {}, "node"))
return DualSet(nds, T, el.entity_dofs())
fiat-2019.2.0~git20210419.7d418fa/FIAT/polynomial_set.py 0000664 0000000 0000000 00000023743 14135323752 0021643 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008-2012 Robert C. Kirby (Texas Tech University)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
# polynomial sets
# basic interface:
# -- defined over some reference element
# -- need to be able to tabulate (jets)
# -- type of entry: could by scalar, numpy array, or object-value
# (such as symmetric tensors, as long as they can be converted <-->
# with 1d arrays)
# Don't need the "Polynomial" class we had before, provided that
# we have an interface for defining sets of functionals (moments against
# an entire set of polynomials)
import numpy
from FIAT import expansions
from FIAT.functional import index_iterator
def mis(m, n):
"""Returns all m-tuples of nonnegative integers that sum up to n."""
if m == 1:
return [(n,)]
elif n == 0:
return [tuple([0] * m)]
else:
return [tuple([n - i] + list(foo))
for i in range(n + 1)
for foo in mis(m - 1, i)]
# We order coeffs by C_{i,j,k}
# where i is the index into the polynomial set,
# j may be an empty tuple (scalar polynomials)
# or else a vector/tensor
# k is the expansion function
# so if I have all bfs at a given point x in an array bf,
# then dot(coeffs, bf) gives the array of bfs
class PolynomialSet(object):
"""Implements a set of polynomials as linear combinations of an
expansion set over a reference element.
ref_el: the reference element
degree: an order labeling the space
embedded degree: the degree of polynomial expansion basis that
must be used to evaluate this space
coeffs: A numpy array containing the coefficients of the expansion
basis for each member of the set. Coeffs is ordered by
coeffs[i,j,k] where i is the label of the member, k is
the label of the expansion function, and j is a (possibly
empty) tuple giving the index for a vector- or tensor-valued
function.
"""
def __init__(self, ref_el, degree, embedded_degree, expansion_set, coeffs,
dmats):
self.ref_el = ref_el
self.num_members = coeffs.shape[0]
self.degree = degree
self.embedded_degree = embedded_degree
self.expansion_set = expansion_set
self.coeffs = coeffs
self.dmats = dmats
def tabulate_new(self, pts):
return numpy.dot(self.coeffs,
self.expansion_set.tabulate(self.embedded_degree, pts))
def tabulate(self, pts, jet_order=0):
"""Returns the values of the polynomial set."""
result = {}
base_vals = self.expansion_set.tabulate(self.embedded_degree, pts)
for i in range(jet_order + 1):
alphas = mis(self.ref_el.get_spatial_dimension(), i)
for alpha in alphas:
if len(self.dmats) > 0:
D = form_matrix_product(self.dmats, alpha)
else:
# special for vertex without defined point location
assert pts == [()]
D = numpy.eye(1)
result[alpha] = numpy.dot(self.coeffs,
numpy.dot(numpy.transpose(D),
base_vals))
return result
def get_expansion_set(self):
return self.expansion_set
def get_coeffs(self):
return self.coeffs
def get_num_members(self):
return self.num_members
def get_degree(self):
return self.degree
def get_embedded_degree(self):
return self.embedded_degree
def get_dmats(self):
return self.dmats
def get_reference_element(self):
return self.ref_el
def get_shape(self):
"""Returns the shape of phi(x), where () corresponds to
scalar (2,) a vector of length 2, etc"""
return self.coeffs.shape[1:-1]
def take(self, items):
"""Extracts subset of polynomials given by items."""
new_coeffs = numpy.take(self.get_coeffs(), items, 0)
return PolynomialSet(self.ref_el, self.degree, self.embedded_degree,
self.expansion_set, new_coeffs, self.dmats)
class ONPolynomialSet(PolynomialSet):
"""Constructs an orthonormal basis out of expansion set by having an
identity matrix of coefficients. Can be used to specify ON bases
for vector- and tensor-valued sets as well.
"""
def __init__(self, ref_el, degree, shape=tuple()):
if shape == tuple():
num_components = 1
else:
flat_shape = numpy.ravel(shape)
num_components = numpy.prod(flat_shape)
num_exp_functions = expansions.polynomial_dimension(ref_el, degree)
num_members = num_components * num_exp_functions
embedded_degree = degree
expansion_set = expansions.get_expansion_set(ref_el)
sd = ref_el.get_spatial_dimension()
# set up coefficients
coeffs_shape = tuple([num_members] + list(shape) + [num_exp_functions])
coeffs = numpy.zeros(coeffs_shape, "d")
# use functional's index_iterator function
cur_bf = 0
if shape == tuple():
coeffs = numpy.eye(num_members)
else:
for idx in index_iterator(shape):
n = expansions.polynomial_dimension(ref_el, embedded_degree)
for exp_bf in range(n):
cur_idx = tuple([cur_bf] + list(idx) + [exp_bf])
coeffs[cur_idx] = 1.0
cur_bf += 1
# construct dmats
if degree == 0:
dmats = [numpy.array([[0.0]], "d") for i in range(sd)]
else:
pts = ref_el.make_points(sd, 0, degree + sd + 1)
v = numpy.transpose(expansion_set.tabulate(degree, pts))
vinv = numpy.linalg.inv(v)
dv = expansion_set.tabulate_derivatives(degree, pts)
dtildes = [[[a[1][i] for a in dvrow] for dvrow in dv]
for i in range(sd)]
dmats = [numpy.dot(vinv, numpy.transpose(dtilde))
for dtilde in dtildes]
PolynomialSet.__init__(self, ref_el, degree, embedded_degree,
expansion_set, coeffs, dmats)
def project(f, U, Q):
"""Computes the expansion coefficients of f in terms of the members of
a polynomial set U. Numerical integration is performed by
quadrature rule Q.
"""
pts = Q.get_points()
wts = Q.get_weights()
f_at_qps = [f(x) for x in pts]
U_at_qps = U.tabulate(pts)
coeffs = numpy.array([sum(wts * f_at_qps * phi) for phi in U_at_qps])
return coeffs
def form_matrix_product(mats, alpha):
"""Forms product over mats[i]**alpha[i]"""
m = mats[0].shape[0]
result = numpy.eye(m)
for i in range(len(alpha)):
for j in range(alpha[i]):
result = numpy.dot(mats[i], result)
return result
def polynomial_set_union_normalized(A, B):
"""Given polynomial sets A and B, constructs a new polynomial set
whose span is the same as that of span(A) union span(B). It may
not contain any of the same members of the set, as we construct a
span via SVD.
"""
new_coeffs = numpy.array(list(A.coeffs) + list(B.coeffs))
func_shape = new_coeffs.shape[1:]
if len(func_shape) == 1:
(u, sig, vt) = numpy.linalg.svd(new_coeffs)
num_sv = len([s for s in sig if abs(s) > 1.e-10])
coeffs = vt[:num_sv]
else:
new_shape0 = new_coeffs.shape[0]
new_shape1 = numpy.prod(func_shape)
newshape = (new_shape0, new_shape1)
nc = numpy.reshape(new_coeffs, newshape)
(u, sig, vt) = numpy.linalg.svd(nc, 1)
num_sv = len([s for s in sig if abs(s) > 1.e-10])
coeffs = numpy.reshape(vt[:num_sv], tuple([num_sv] + list(func_shape)))
return PolynomialSet(A.get_reference_element(),
A.get_degree(),
A.get_embedded_degree(),
A.get_expansion_set(),
coeffs,
A.get_dmats())
class ONSymTensorPolynomialSet(PolynomialSet):
"""Constructs an orthonormal basis for symmetric-tensor-valued
polynomials on a reference element.
"""
def __init__(self, ref_el, degree, size=None):
sd = ref_el.get_spatial_dimension()
if size is None:
size = sd
shape = (size, size)
num_exp_functions = expansions.polynomial_dimension(ref_el, degree)
num_components = size * (size + 1) // 2
num_members = num_components * num_exp_functions
embedded_degree = degree
expansion_set = expansions.get_expansion_set(ref_el)
# set up coefficients for symmetric tensors
coeffs_shape = tuple([num_members] + list(shape) + [num_exp_functions])
coeffs = numpy.zeros(coeffs_shape, "d")
cur_bf = 0
for [i, j] in index_iterator(shape):
n = expansions.polynomial_dimension(ref_el, embedded_degree)
if i == j:
for exp_bf in range(n):
cur_idx = tuple([cur_bf] + [i, j] + [exp_bf])
coeffs[cur_idx] = 1.0
cur_bf += 1
elif i < j:
for exp_bf in range(n):
cur_idx = tuple([cur_bf] + [i, j] + [exp_bf])
coeffs[cur_idx] = 1.0
cur_idx = tuple([cur_bf] + [j, i] + [exp_bf])
coeffs[cur_idx] = 1.0
cur_bf += 1
# construct dmats. this is the same as ONPolynomialSet.
pts = ref_el.make_points(sd, 0, degree + sd + 1)
v = numpy.transpose(expansion_set.tabulate(degree, pts))
vinv = numpy.linalg.inv(v)
dv = expansion_set.tabulate_derivatives(degree, pts)
dtildes = [[[a[1][i] for a in dvrow] for dvrow in dv]
for i in range(sd)]
dmats = [numpy.dot(vinv, numpy.transpose(dtilde)) for dtilde in dtildes]
PolynomialSet.__init__(self, ref_el, degree, embedded_degree,
expansion_set, coeffs, dmats)
fiat-2019.2.0~git20210419.7d418fa/FIAT/quadrature.py 0000664 0000000 0000000 00000031376 14135323752 0020763 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008 Robert C. Kirby (Texas Tech University)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Modified by Marie E. Rognes (meg@simula.no), 2012
# Modified by David A. Ham (david.ham@imperial.ac.uk), 2015
import itertools
import math
import numpy
from FIAT import reference_element, expansions, jacobi, orthopoly
class QuadratureRule(object):
"""General class that models integration over a reference element
as the weighted sum of a function evaluated at a set of points."""
def __init__(self, ref_el, pts, wts):
if len(wts) != len(pts):
raise ValueError("Have %d weights, but %d points" % (len(wts), len(pts)))
self.ref_el = ref_el
self.pts = pts
self.wts = wts
def get_points(self):
return numpy.array(self.pts)
def get_weights(self):
return numpy.array(self.wts)
def integrate(self, f):
return sum([w * f(x) for (x, w) in zip(self.pts, self.wts)])
class GaussJacobiQuadratureLineRule(QuadratureRule):
"""Gauss-Jacobi quadature rule determined by Jacobi weights a and b
using m roots of m:th order Jacobi polynomial."""
def __init__(self, ref_el, m):
# this gives roots on the default (-1,1) reference element
# (xs_ref, ws_ref) = compute_gauss_jacobi_rule(a, b, m)
(xs_ref, ws_ref) = compute_gauss_jacobi_rule(0., 0., m)
Ref1 = reference_element.DefaultLine()
A, b = reference_element.make_affine_mapping(Ref1.get_vertices(),
ref_el.get_vertices())
mapping = lambda x: numpy.dot(A, x) + b
scale = numpy.linalg.det(A)
xs = tuple([tuple(mapping(x_ref)[0]) for x_ref in xs_ref])
ws = tuple([scale * w for w in ws_ref])
QuadratureRule.__init__(self, ref_el, xs, ws)
class GaussLobattoLegendreQuadratureLineRule(QuadratureRule):
"""Implement the Gauss-Lobatto-Legendre quadrature rules on the interval using
Greg von Winckel's implementation. This facilitates implementing
spectral elements.
The quadrature rule uses m points for a degree of precision of 2m-3.
"""
def __init__(self, ref_el, m):
if m < 2:
raise ValueError(
"Gauss-Labotto-Legendre quadrature invalid for fewer than 2 points")
Ref1 = reference_element.DefaultLine()
verts = Ref1.get_vertices()
if m > 2:
# Calculate the recursion coefficients.
alpha, beta = orthopoly.rec_jacobi(m, 0, 0)
xs_ref, ws_ref = orthopoly.lobatto(alpha, beta, verts[0][0], verts[1][0])
else:
# Special case for lowest order.
xs_ref = [v[0] for v in verts[:]]
ws_ref = (0.5 * (xs_ref[1] - xs_ref[0]), ) * 2
A, b = reference_element.make_affine_mapping(Ref1.get_vertices(),
ref_el.get_vertices())
mapping = lambda x: numpy.dot(A, x) + b
scale = numpy.linalg.det(A)
xs = tuple([tuple(mapping(x_ref)[0]) for x_ref in xs_ref])
ws = tuple([scale * w for w in ws_ref])
QuadratureRule.__init__(self, ref_el, xs, ws)
class GaussLegendreQuadratureLineRule(QuadratureRule):
"""Produce the Gauss--Legendre quadrature rules on the interval using
the implementation in numpy. This facilitates implementing
discontinuous spectral elements.
The quadrature rule uses m points for a degree of precision of 2m-1.
"""
def __init__(self, ref_el, m):
if m < 1:
raise ValueError(
"Gauss-Legendre quadrature invalid for fewer than 2 points")
xs_ref, ws_ref = numpy.polynomial.legendre.leggauss(m)
A, b = reference_element.make_affine_mapping(((-1.,), (1.)),
ref_el.get_vertices())
mapping = lambda x: numpy.dot(A, x) + b
scale = numpy.linalg.det(A)
xs = tuple([tuple(mapping(x_ref)[0]) for x_ref in xs_ref])
ws = tuple([scale * w for w in ws_ref])
QuadratureRule.__init__(self, ref_el, xs, ws)
class RadauQuadratureLineRule(QuadratureRule):
"""Produce the Gauss--Radau quadrature rules on the interval using
an adaptation of Winkel's Matlab code.
The quadrature rule uses m points for a degree of precision of 2m-1.
"""
def __init__(self, ref_el, m, right=True):
assert m >= 1
N = m - 1
# Use Chebyshev-Gauss-Radau nodes as initial guess for LGR nodes
x = -numpy.cos(2 * numpy.pi * numpy.linspace(0, N, m) / (2 * N + 1))
P = numpy.zeros((N + 1, N + 2))
xold = 2
free = numpy.arange(1, N + 1, dtype='int')
while numpy.max(numpy.abs(x - xold)) > 5e-16:
xold = x.copy()
P[0, :] = (-1) ** numpy.arange(0, N + 2)
P[free, 0] = 1
P[free, 1] = x[free]
for k in range(2, N + 2):
P[free, k] = ((2 * k - 1) * x[free] * P[free, k - 1] - (k - 1) * P[free, k - 2]) / k
x[free] = xold[free] - ((1 - xold[free]) / (N + 1)) * (P[free, N] + P[free, N + 1]) / (P[free, N] - P[free, N + 1])
# The Legendre-Gauss-Radau Vandermonde
P = P[:, :-1]
# Compute the weights
w = numpy.zeros(N + 1)
w[0] = 2 / (N + 1) ** 2
w[free] = (1 - x[free])/((N + 1) * P[free, -1])**2
if right:
x = numpy.flip(-x)
w = numpy.flip(w)
xs_ref = x
ws_ref = w
A, b = reference_element.make_affine_mapping(((-1.,), (1.)),
ref_el.get_vertices())
mapping = lambda x: numpy.dot(A, x) + b
scale = numpy.linalg.det(A)
xs = tuple([tuple(mapping(x_ref)[0]) for x_ref in xs_ref])
ws = tuple([scale * w for w in ws_ref])
QuadratureRule.__init__(self, ref_el, xs, ws)
class CollapsedQuadratureTriangleRule(QuadratureRule):
"""Implements the collapsed quadrature rules defined in
Karniadakis & Sherwin by mapping products of Gauss-Jacobi rules
from the square to the triangle."""
def __init__(self, ref_el, m):
ptx, wx = compute_gauss_jacobi_rule(0., 0., m)
pty, wy = compute_gauss_jacobi_rule(1., 0., m)
# map ptx , pty
pts_ref = [expansions.xi_triangle((x, y))
for x in ptx for y in pty]
Ref1 = reference_element.DefaultTriangle()
A, b = reference_element.make_affine_mapping(Ref1.get_vertices(),
ref_el.get_vertices())
mapping = lambda x: numpy.dot(A, x) + b
scale = numpy.linalg.det(A)
pts = tuple([tuple(mapping(x)) for x in pts_ref])
wts = [0.5 * scale * w1 * w2 for w1 in wx for w2 in wy]
QuadratureRule.__init__(self, ref_el, tuple(pts), tuple(wts))
class CollapsedQuadratureTetrahedronRule(QuadratureRule):
"""Implements the collapsed quadrature rules defined in
Karniadakis & Sherwin by mapping products of Gauss-Jacobi rules
from the cube to the tetrahedron."""
def __init__(self, ref_el, m):
ptx, wx = compute_gauss_jacobi_rule(0., 0., m)
pty, wy = compute_gauss_jacobi_rule(1., 0., m)
ptz, wz = compute_gauss_jacobi_rule(2., 0., m)
# map ptx , pty
pts_ref = [expansions.xi_tetrahedron((x, y, z))
for x in ptx for y in pty for z in ptz]
Ref1 = reference_element.DefaultTetrahedron()
A, b = reference_element.make_affine_mapping(Ref1.get_vertices(),
ref_el.get_vertices())
mapping = lambda x: numpy.dot(A, x) + b
scale = numpy.linalg.det(A)
pts = tuple([tuple(mapping(x)) for x in pts_ref])
wts = [scale * 0.125 * w1 * w2 * w3
for w1 in wx for w2 in wy for w3 in wz]
QuadratureRule.__init__(self, ref_el, tuple(pts), tuple(wts))
class UFCTetrahedronFaceQuadratureRule(QuadratureRule):
"""Highly specialized quadrature rule for the face of a
tetrahedron, mapped from a reference triangle, used for higher
order Nedelecs"""
def __init__(self, face_number, degree):
# Create quadrature rule on reference triangle
reference_triangle = reference_element.UFCTriangle()
reference_rule = make_quadrature(reference_triangle, degree)
ref_points = reference_rule.get_points()
ref_weights = reference_rule.get_weights()
# Get geometry information about the face of interest
reference_tet = reference_element.UFCTetrahedron()
face = reference_tet.get_topology()[2][face_number]
vertices = reference_tet.get_vertices_of_subcomplex(face)
# Use tet to map points and weights on the appropriate face
vertices = [numpy.array(list(vertex)) for vertex in vertices]
x0 = vertices[0]
J = numpy.vstack([vertices[1] - x0, vertices[2] - x0]).T
# This is just a very numpyfied way of writing J*p + x0:
points = numpy.einsum("ij,kj->ki", J, ref_points) + x0
# Map weights: multiply reference weights by sqrt(|J^T J|)
detJTJ = numpy.linalg.det(numpy.dot(J.T, J))
weights = numpy.sqrt(detJTJ) * ref_weights
# Initialize super class with new points and weights
QuadratureRule.__init__(self, reference_tet, points, weights)
self._reference_rule = reference_rule
self._J = J
def reference_rule(self):
return self._reference_rule
def jacobian(self):
return self._J
def make_quadrature(ref_el, m):
"""Returns the collapsed quadrature rule using m points per
direction on the given reference element. In the tensor product
case, m is a tuple."""
if isinstance(m, tuple):
min_m = min(m)
else:
min_m = m
msg = "Expecting at least one (not %d) quadrature point per direction" % min_m
assert (min_m > 0), msg
if ref_el.get_shape() == reference_element.POINT:
return QuadratureRule(ref_el, [()], [1])
elif ref_el.get_shape() == reference_element.LINE:
return GaussJacobiQuadratureLineRule(ref_el, m)
elif ref_el.get_shape() == reference_element.TRIANGLE:
return CollapsedQuadratureTriangleRule(ref_el, m)
elif ref_el.get_shape() == reference_element.TETRAHEDRON:
return CollapsedQuadratureTetrahedronRule(ref_el, m)
elif ref_el.get_shape() == reference_element.QUADRILATERAL:
line_rule = GaussJacobiQuadratureLineRule(ref_el.construct_subelement(1), m)
return make_tensor_product_quadrature(line_rule, line_rule)
elif ref_el.get_shape() == reference_element.HEXAHEDRON:
line_rule = GaussJacobiQuadratureLineRule(ref_el.construct_subelement(1), m)
return make_tensor_product_quadrature(line_rule, line_rule, line_rule)
else:
raise ValueError("Unable to make quadrature for cell: %s" % ref_el)
def make_tensor_product_quadrature(*quad_rules):
"""Returns the quadrature rule for a TensorProduct cell, by combining
the quadrature rules of the components."""
ref_el = reference_element.TensorProductCell(*[q.ref_el
for q in quad_rules])
# Coordinates are "concatenated", weights are multiplied
pts = [list(itertools.chain(*pt_tuple))
for pt_tuple in itertools.product(*[q.pts for q in quad_rules])]
wts = [numpy.prod(wt_tuple)
for wt_tuple in itertools.product(*[q.wts for q in quad_rules])]
return QuadratureRule(ref_el, pts, wts)
# rule to get Gauss-Jacobi points
def compute_gauss_jacobi_points(a, b, m):
"""Computes the m roots of P_{m}^{a,b} on [-1,1] by Newton's method.
The initial guesses are the Chebyshev points. Algorithm
implemented in Python from the pseudocode given by Karniadakis and
Sherwin"""
x = []
eps = 1.e-8
max_iter = 100
for k in range(0, m):
r = -math.cos((2.0 * k + 1.0) * math.pi / (2.0 * m))
if k > 0:
r = 0.5 * (r + x[k - 1])
j = 0
delta = 2 * eps
while j < max_iter:
s = 0
for i in range(0, k):
s = s + 1.0 / (r - x[i])
f = jacobi.eval_jacobi(a, b, m, r)
fp = jacobi.eval_jacobi_deriv(a, b, m, r)
delta = f / (fp - f * s)
r = r - delta
if math.fabs(delta) < eps:
break
else:
j = j + 1
x.append(r)
return x
def compute_gauss_jacobi_rule(a, b, m):
xs = compute_gauss_jacobi_points(a, b, m)
a1 = math.pow(2, a + b + 1)
a2 = math.gamma(a + m + 1)
a3 = math.gamma(b + m + 1)
a4 = math.gamma(a + b + m + 1)
a5 = math.factorial(m)
a6 = a1 * a2 * a3 / a4 / a5
ws = [a6 / (1.0 - x**2.0) / jacobi.eval_jacobi_deriv(a, b, m, x)**2.0
for x in xs]
return xs, ws
fiat-2019.2.0~git20210419.7d418fa/FIAT/quadrature_element.py 0000664 0000000 0000000 00000004707 14135323752 0022472 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# Copyright (C) 2007-2016 Kristian B. Oelgaard
# Copyright (C) 2017 Miklós Homolya
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Modified by Garth N. Wells 2006-2009
import numpy
from FIAT.dual_set import DualSet
from FIAT.finite_element import FiniteElement
from FIAT.functional import PointEvaluation
class QuadratureElement(FiniteElement):
"""A set of quadrature points pretending to be a finite element."""
def __init__(self, ref_el, points, weights=None):
# Create entity dofs.
entity_dofs = {dim: {entity: [] for entity in entities}
for dim, entities in ref_el.get_topology().items()}
entity_dofs[ref_el.get_dimension()] = {0: list(range(len(points)))}
# The dual nodes are PointEvaluations at the quadrature points.
# FIXME: KBO: Check if this gives expected results for code like evaluate_dof.
nodes = [PointEvaluation(ref_el, tuple(point)) for point in points]
# Construct the dual set
dual = DualSet(nodes, ref_el, entity_dofs)
super(QuadratureElement, self).__init__(ref_el, dual, order=None)
self._points = points # save the quadrature points & weights
self._weights = weights
def value_shape(self):
"The QuadratureElement is scalar valued"
return ()
def tabulate(self, order, points, entity=None):
"""Return the identity matrix of size (num_quad_points, num_quad_points),
in a format that monomialintegration and monomialtabulation understands."""
if entity is not None and entity != (self.ref_el.get_dimension(), 0):
raise ValueError('QuadratureElement does not "tabulate" on subentities.')
# Derivatives are not defined on a QuadratureElement
if order:
raise ValueError("Derivatives are not defined on a QuadratureElement.")
# Check that incoming points are equal to the quadrature points.
if len(points) != len(self._points) or abs(numpy.array(points) - self._points).max() > 1e-12:
raise AssertionError("Mismatch of quadrature points!")
# Return the identity matrix of size len(self._points).
values = numpy.eye(len(self._points))
dim = self.ref_el.get_spatial_dimension()
return {(0,) * dim: values}
@staticmethod
def is_nodal():
# No polynomial basis, but still nodal.
return True
fiat-2019.2.0~git20210419.7d418fa/FIAT/quadrature_schemes.py 0000664 0000000 0000000 00000054251 14135323752 0022467 0 ustar 00root root 0000000 0000000 """Quadrature schemes on cells
This module generates quadrature schemes on reference cells that integrate
exactly a polynomial of a given degree using a specified scheme.
Scheme options are:
scheme="default"
scheme="canonical" (collapsed Gauss scheme)
Background on the schemes:
Keast rules for tetrahedra:
Keast, P. Moderate-degree tetrahedral quadrature formulas, Computer
Methods in Applied Mechanics and Engineering 55(3):339-348, 1986.
http://dx.doi.org/10.1016/0045-7825(86)90059-9
"""
# Copyright (C) 2011 Garth N. Wells
# Copyright (C) 2016 Miklos Homolya
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# First added: 2011-04-19
# Last changed: 2011-04-19
# NumPy
from numpy import array, arange, float64
# FIAT
from FIAT.reference_element import QUADRILATERAL, HEXAHEDRON, TENSORPRODUCT, UFCTriangle, UFCTetrahedron
from FIAT.quadrature import QuadratureRule, make_quadrature, make_tensor_product_quadrature
def create_quadrature(ref_el, degree, scheme="default"):
"""
Generate quadrature rule for given reference element
that will integrate an polynomial of order 'degree' exactly.
For low-degree (<=6) polynomials on triangles and tetrahedra, this
uses hard-coded rules, otherwise it falls back to a collapsed
Gauss scheme on simplices. On tensor-product cells, it is a
tensor-product quadrature rule of the subcells.
:arg cell: The FIAT cell to create the quadrature for.
:arg degree: The degree of polynomial that the rule should
integrate exactly.
"""
if ref_el.get_shape() == TENSORPRODUCT:
try:
degree = tuple(degree)
except TypeError:
degree = (degree,) * len(ref_el.cells)
assert len(ref_el.cells) == len(degree)
quad_rules = [create_quadrature(c, d, scheme)
for c, d in zip(ref_el.cells, degree)]
return make_tensor_product_quadrature(*quad_rules)
if ref_el.get_shape() in [QUADRILATERAL, HEXAHEDRON]:
return create_quadrature(ref_el.product, degree, scheme)
if degree < 0:
raise ValueError("Need positive degree, not %d" % degree)
if scheme == "default":
# TODO: Point transformation to support good schemes on
# non-UFC reference elements.
if isinstance(ref_el, UFCTriangle):
return _triangle_scheme(degree)
elif isinstance(ref_el, UFCTetrahedron):
return _tetrahedron_scheme(degree)
else:
return _fiat_scheme(ref_el, degree)
elif scheme == "canonical":
return _fiat_scheme(ref_el, degree)
elif scheme == "KMV": # Kong-Mulder-Veldhuizen scheme
return _kmv_lump_scheme(ref_el, degree)
else:
raise ValueError("Unknown quadrature scheme: %s." % scheme)
def _fiat_scheme(ref_el, degree):
"""Get quadrature scheme from FIAT interface"""
# Number of points per axis for exact integration
num_points_per_axis = (degree + 1 + 1) // 2
# Create and return FIAT quadrature rule
return make_quadrature(ref_el, num_points_per_axis)
def _kmv_lump_scheme(ref_el, degree):
"""Specialized quadrature schemes for P < 6 for KMV simplical elements."""
sd = ref_el.get_spatial_dimension()
# set the unit element
if sd == 2:
T = UFCTriangle()
elif sd == 3:
T = UFCTetrahedron()
else:
raise ValueError("Dimension not supported")
if degree == 1:
x = ref_el.vertices
w = arange(sd + 1, dtype=float64)
if sd == 2:
w[:] = 1.0 / 6.0
elif sd == 3:
w[:] = 1.0 / 24.0
else:
raise ValueError("Dimension not supported")
elif degree == 2:
if sd == 2:
x = list(ref_el.vertices)
for e in range(3):
x.extend(ref_el.make_points(1, e, 2)) # edge midpoints
x.extend(ref_el.make_points(2, 0, 3)) # barycenter
w = arange(7, dtype=float64)
w[0:3] = 1.0 / 40.0
w[3:6] = 1.0 / 15.0
w[6] = 9.0 / 40.0
elif sd == 3:
x = list(ref_el.vertices)
x.extend(
[
(0.0, 0.50, 0.50),
(0.50, 0.0, 0.50),
(0.50, 0.50, 0.0),
(0.0, 0.0, 0.50),
(0.0, 0.50, 0.0),
(0.50, 0.0, 0.0),
]
)
# in facets
x.extend(
[
(0.33333333333333337, 0.3333333333333333, 0.3333333333333333),
(0.0, 0.3333333333333333, 0.3333333333333333),
(0.3333333333333333, 0.0, 0.3333333333333333),
(0.3333333333333333, 0.3333333333333333, 0.0),
]
)
# in the cell
x.extend([(1 / 4, 1 / 4, 1 / 4)])
w = arange(15, dtype=float64)
w[0:4] = 17.0 / 5040.0
w[4:10] = 2.0 / 315.0
w[10:14] = 9.0 / 560.0
w[14] = 16.0 / 315.0
else:
raise ValueError("Dimension not supported")
elif degree == 3:
if sd == 2:
alpha = 0.2934695559090401
beta = 0.2073451756635909
x = list(ref_el.vertices)
x.extend(
[
(1 - alpha, alpha),
(alpha, 1 - alpha),
(0.0, 1 - alpha),
(0.0, alpha),
(alpha, 0.0),
(1 - alpha, 0.0),
] # edge points
)
x.extend(
[(beta, beta), (1 - 2 * beta, beta), (beta, 1 - 2 * beta)]
) # points in center of cell
w = arange(12, dtype=float64)
w[0:3] = 0.007436456512410291
w[3:9] = 0.02442084061702551
w[9:12] = 0.1103885289202054
elif sd == 3:
x = list(ref_el.vertices)
x.extend(
[
(0, 0.685789657581967, 0.314210342418033),
(0, 0.314210342418033, 0.685789657581967),
(0.314210342418033, 0, 0.685789657581967),
(0.685789657581967, 0, 0.314210342418033),
(0.685789657581967, 0.314210342418033, 0.0),
(0.314210342418033, 0.685789657581967, 0.0),
(0, 0, 0.685789657581967),
(0, 0, 0.314210342418033),
(0, 0.314210342418033, 0.0),
(0, 0.685789657581967, 0.0),
(0.314210342418033, 0, 0.0),
(0.685789657581967, 0, 0.0),
]
) # 12 points on edges of facets (0-->1-->2)
x.extend(
[
(0.21548220313557542, 0.5690355937288492, 0.21548220313557542),
(0.21548220313557542, 0.21548220313557542, 0.5690355937288492),
(0.5690355937288492, 0.21548220313557542, 0.21548220313557542),
(0.0, 0.5690355937288492, 0.21548220313557542),
(0.0, 0.21548220313557542, 0.5690355937288492),
(0.0, 0.21548220313557542, 0.21548220313557542),
(0.5690355937288492, 0.0, 0.21548220313557542),
(0.21548220313557542, 0.0, 0.5690355937288492),
(0.21548220313557542, 0.0, 0.21548220313557542),
(0.5690355937288492, 0.21548220313557542, 0.0),
(0.21548220313557542, 0.5690355937288492, 0.0),
(0.21548220313557542, 0.21548220313557542, 0.0),
]
) # 12 points (3 points on each facet, 1st two parallel to edge 0)
alpha = 1 / 6
x.extend(
[
(alpha, alpha, 0.5),
(0.5, alpha, alpha),
(alpha, 0.5, alpha),
(alpha, alpha, alpha),
]
) # 4 points inside the cell
w = arange(32, dtype=float64)
w[0:4] = 0.00068688236002531922325120561367839
w[4:16] = 0.0015107814913526136472998739890272
w[16:28] = 0.0050062894680040258624242888174649
w[28:32] = 0.021428571428571428571428571428571
else:
raise ValueError("Dimension not supported")
elif degree == 4:
if sd == 2:
alpha = 0.2113248654051871 # 0.2113248654051871
beta1 = 0.4247639617258106 # 0.4247639617258106
beta2 = 0.130791593829745 # 0.130791593829745
x = list(ref_el.vertices)
for e in range(3):
x.extend(ref_el.make_points(1, e, 2)) # edge midpoints
x.extend(
[
(1 - alpha, alpha),
(alpha, 1 - alpha),
(0.0, 1 - alpha),
(0.0, alpha),
(alpha, 0.0),
(1 - alpha, 0.0),
] # edge points
)
x.extend(
[(beta1, beta1), (1 - 2 * beta1, beta1), (beta1, 1 - 2 * beta1)]
) # points in center of cell
x.extend(
[(beta2, beta2), (1 - 2 * beta2, beta2), (beta2, 1 - 2 * beta2)]
) # points in center of cell
w = arange(18, dtype=float64)
w[0:3] = 0.003174603174603175 # chk
w[3:6] = 0.0126984126984127 # chk 0.0126984126984127
w[6:12] = 0.01071428571428571 # chk 0.01071428571428571
w[12:15] = 0.07878121446939182 # chk 0.07878121446939182
w[15:18] = 0.05058386489568756 # chk 0.05058386489568756
else:
raise ValueError("Dimension not supported")
elif degree == 5:
if sd == 2:
alpha1 = 0.3632980741536860e-00
alpha2 = 0.1322645816327140e-00
beta1 = 0.4578368380791611e-00
beta2 = 0.2568591072619591e-00
beta3 = 0.5752768441141011e-01
gamma1 = 0.7819258362551702e-01
delta1 = 0.2210012187598900e-00
x = list(ref_el.vertices)
x.extend(
[
(1 - alpha1, alpha1),
(alpha1, 1 - alpha1),
(0.0, 1 - alpha1),
(0.0, alpha1),
(alpha1, 0.0),
(1 - alpha1, 0.0),
] # edge points
)
x.extend(
[
(1 - alpha2, alpha2),
(alpha2, 1 - alpha2),
(0.0, 1 - alpha2),
(0.0, alpha2),
(alpha2, 0.0),
(1 - alpha2, 0.0),
] # edge points
)
x.extend(
[(beta1, beta1), (1 - 2 * beta1, beta1), (beta1, 1 - 2 * beta1)]
) # points in center of cell
x.extend(
[(beta2, beta2), (1 - 2 * beta2, beta2), (beta2, 1 - 2 * beta2)]
) # points in center of cell
x.extend(
[(beta3, beta3), (1 - 2 * beta3, beta3), (beta3, 1 - 2 * beta3)]
) # points in center of cell
x.extend(
[
(gamma1, delta1),
(1 - gamma1 - delta1, delta1),
(gamma1, 1 - gamma1 - delta1),
(delta1, gamma1),
(1 - gamma1 - delta1, gamma1),
(delta1, 1 - gamma1 - delta1),
] # edge points
)
w = arange(30, dtype=float64)
w[0:3] = 0.7094239706792450e-03
w[3:9] = 0.6190565003676629e-02
w[9:15] = 0.3480578640489211e-02
w[15:18] = 0.3453043037728279e-01
w[18:21] = 0.4590123763076286e-01
w[21:24] = 0.1162613545961757e-01
w[24:30] = 0.2727857596999626e-01
else:
raise ValueError("Dimension not supported")
# Return scheme
return QuadratureRule(T, x, w)
def _triangle_scheme(degree):
"""Return a quadrature scheme on a triangle of specified order. Falls
back on canonical rule for higher orders."""
if degree == 0 or degree == 1:
# Scheme from Zienkiewicz and Taylor, 1 point, degree of precision 1
x = array([[1.0/3.0, 1.0/3.0]])
w = array([0.5])
elif degree == 2:
# Scheme from Strang and Fix, 3 points, degree of precision 2
x = array([[1.0/6.0, 1.0/6.0],
[1.0/6.0, 2.0/3.0],
[2.0/3.0, 1.0/6.0]])
w = arange(3, dtype=float64)
w[:] = 1.0/6.0
elif degree == 3:
# Scheme from Strang and Fix, 6 points, degree of precision 3
x = array([[0.659027622374092, 0.231933368553031],
[0.659027622374092, 0.109039009072877],
[0.231933368553031, 0.659027622374092],
[0.231933368553031, 0.109039009072877],
[0.109039009072877, 0.659027622374092],
[0.109039009072877, 0.231933368553031]])
w = arange(6, dtype=float64)
w[:] = 1.0/12.0
elif degree == 4:
# Scheme from Strang and Fix, 6 points, degree of precision 4
x = array([[0.816847572980459, 0.091576213509771],
[0.091576213509771, 0.816847572980459],
[0.091576213509771, 0.091576213509771],
[0.108103018168070, 0.445948490915965],
[0.445948490915965, 0.108103018168070],
[0.445948490915965, 0.445948490915965]])
w = arange(6, dtype=float64)
w[0:3] = 0.109951743655322
w[3:6] = 0.223381589678011
w = w/2.0
elif degree == 5:
# Scheme from Strang and Fix, 7 points, degree of precision 5
x = array([[0.33333333333333333, 0.33333333333333333],
[0.79742698535308720, 0.10128650732345633],
[0.10128650732345633, 0.79742698535308720],
[0.10128650732345633, 0.10128650732345633],
[0.05971587178976981, 0.47014206410511505],
[0.47014206410511505, 0.05971587178976981],
[0.47014206410511505, 0.47014206410511505]])
w = arange(7, dtype=float64)
w[0] = 0.22500000000000000
w[1:4] = 0.12593918054482717
w[4:7] = 0.13239415278850616
w = w/2.0
elif degree == 6:
# Scheme from Strang and Fix, 12 points, degree of precision 6
x = array([[0.873821971016996, 0.063089014491502],
[0.063089014491502, 0.873821971016996],
[0.063089014491502, 0.063089014491502],
[0.501426509658179, 0.249286745170910],
[0.249286745170910, 0.501426509658179],
[0.249286745170910, 0.249286745170910],
[0.636502499121399, 0.310352451033785],
[0.636502499121399, 0.053145049844816],
[0.310352451033785, 0.636502499121399],
[0.310352451033785, 0.053145049844816],
[0.053145049844816, 0.636502499121399],
[0.053145049844816, 0.310352451033785]])
w = arange(12, dtype=float64)
w[0:3] = 0.050844906370207
w[3:6] = 0.116786275726379
w[6:12] = 0.082851075618374
w = w/2.0
else:
# Get canonical scheme
return _fiat_scheme(UFCTriangle(), degree)
# Return scheme
return QuadratureRule(UFCTriangle(), x, w)
def _tetrahedron_scheme(degree):
"""Return a quadrature scheme on a tetrahedron of specified
degree. Falls back on canonical rule for higher orders"""
if degree == 0 or degree == 1:
# Scheme from Zienkiewicz and Taylor, 1 point, degree of precision 1
x = array([[1.0/4.0, 1.0/4.0, 1.0/4.0]])
w = array([1.0/6.0])
elif degree == 2:
# Scheme from Zienkiewicz and Taylor, 4 points, degree of precision 2
a, b = 0.585410196624969, 0.138196601125011
x = array([[a, b, b],
[b, a, b],
[b, b, a],
[b, b, b]])
w = arange(4, dtype=float64)
w[:] = 1.0/24.0
elif degree == 3:
# Scheme from Zienkiewicz and Taylor, 5 points, degree of precision 3
# Note: this scheme has a negative weight
x = array([[0.2500000000000000, 0.2500000000000000, 0.2500000000000000],
[0.5000000000000000, 0.1666666666666666, 0.1666666666666666],
[0.1666666666666666, 0.5000000000000000, 0.1666666666666666],
[0.1666666666666666, 0.1666666666666666, 0.5000000000000000],
[0.1666666666666666, 0.1666666666666666, 0.1666666666666666]])
w = arange(5, dtype=float64)
w[0] = -0.8
w[1:5] = 0.45
w = w/6.0
elif degree == 4:
# Keast rule, 14 points, degree of precision 4
# Values taken from http://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tet/quadrature_rules_tet.html
# (KEAST5)
x = array([[0.0000000000000000, 0.5000000000000000, 0.5000000000000000],
[0.5000000000000000, 0.0000000000000000, 0.5000000000000000],
[0.5000000000000000, 0.5000000000000000, 0.0000000000000000],
[0.5000000000000000, 0.0000000000000000, 0.0000000000000000],
[0.0000000000000000, 0.5000000000000000, 0.0000000000000000],
[0.0000000000000000, 0.0000000000000000, 0.5000000000000000],
[0.6984197043243866, 0.1005267652252045, 0.1005267652252045],
[0.1005267652252045, 0.1005267652252045, 0.1005267652252045],
[0.1005267652252045, 0.1005267652252045, 0.6984197043243866],
[0.1005267652252045, 0.6984197043243866, 0.1005267652252045],
[0.0568813795204234, 0.3143728734931922, 0.3143728734931922],
[0.3143728734931922, 0.3143728734931922, 0.3143728734931922],
[0.3143728734931922, 0.3143728734931922, 0.0568813795204234],
[0.3143728734931922, 0.0568813795204234, 0.3143728734931922]])
w = arange(14, dtype=float64)
w[0:6] = 0.0190476190476190
w[6:10] = 0.0885898247429807
w[10:14] = 0.1328387466855907
w = w/6.0
elif degree == 5:
# Keast rule, 15 points, degree of precision 5
# Values taken from http://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tet/quadrature_rules_tet.html
# (KEAST6)
x = array([[0.2500000000000000, 0.2500000000000000, 0.2500000000000000],
[0.0000000000000000, 0.3333333333333333, 0.3333333333333333],
[0.3333333333333333, 0.3333333333333333, 0.3333333333333333],
[0.3333333333333333, 0.3333333333333333, 0.0000000000000000],
[0.3333333333333333, 0.0000000000000000, 0.3333333333333333],
[0.7272727272727273, 0.0909090909090909, 0.0909090909090909],
[0.0909090909090909, 0.0909090909090909, 0.0909090909090909],
[0.0909090909090909, 0.0909090909090909, 0.7272727272727273],
[0.0909090909090909, 0.7272727272727273, 0.0909090909090909],
[0.4334498464263357, 0.0665501535736643, 0.0665501535736643],
[0.0665501535736643, 0.4334498464263357, 0.0665501535736643],
[0.0665501535736643, 0.0665501535736643, 0.4334498464263357],
[0.0665501535736643, 0.4334498464263357, 0.4334498464263357],
[0.4334498464263357, 0.0665501535736643, 0.4334498464263357],
[0.4334498464263357, 0.4334498464263357, 0.0665501535736643]])
w = arange(15, dtype=float64)
w[0] = 0.1817020685825351
w[1:5] = 0.0361607142857143
w[5:9] = 0.0698714945161738
w[9:15] = 0.0656948493683187
w = w/6.0
elif degree == 6:
# Keast rule, 24 points, degree of precision 6
# Values taken from http://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tet/quadrature_rules_tet.html
# (KEAST7)
x = array([[0.3561913862225449, 0.2146028712591517, 0.2146028712591517],
[0.2146028712591517, 0.2146028712591517, 0.2146028712591517],
[0.2146028712591517, 0.2146028712591517, 0.3561913862225449],
[0.2146028712591517, 0.3561913862225449, 0.2146028712591517],
[0.8779781243961660, 0.0406739585346113, 0.0406739585346113],
[0.0406739585346113, 0.0406739585346113, 0.0406739585346113],
[0.0406739585346113, 0.0406739585346113, 0.8779781243961660],
[0.0406739585346113, 0.8779781243961660, 0.0406739585346113],
[0.0329863295731731, 0.3223378901422757, 0.3223378901422757],
[0.3223378901422757, 0.3223378901422757, 0.3223378901422757],
[0.3223378901422757, 0.3223378901422757, 0.0329863295731731],
[0.3223378901422757, 0.0329863295731731, 0.3223378901422757],
[0.2696723314583159, 0.0636610018750175, 0.0636610018750175],
[0.0636610018750175, 0.2696723314583159, 0.0636610018750175],
[0.0636610018750175, 0.0636610018750175, 0.2696723314583159],
[0.6030056647916491, 0.0636610018750175, 0.0636610018750175],
[0.0636610018750175, 0.6030056647916491, 0.0636610018750175],
[0.0636610018750175, 0.0636610018750175, 0.6030056647916491],
[0.0636610018750175, 0.2696723314583159, 0.6030056647916491],
[0.2696723314583159, 0.6030056647916491, 0.0636610018750175],
[0.6030056647916491, 0.0636610018750175, 0.2696723314583159],
[0.0636610018750175, 0.6030056647916491, 0.2696723314583159],
[0.2696723314583159, 0.0636610018750175, 0.6030056647916491],
[0.6030056647916491, 0.2696723314583159, 0.0636610018750175]])
w = arange(24, dtype=float64)
w[0:4] = 0.0399227502581679
w[4:8] = 0.0100772110553207
w[8:12] = 0.0553571815436544
w[12:24] = 0.0482142857142857
w = w/6.0
else:
# Get canonical scheme
return _fiat_scheme(UFCTetrahedron(), degree)
# Return scheme
return QuadratureRule(UFCTetrahedron(), x, w)
fiat-2019.2.0~git20210419.7d418fa/FIAT/raviart_thomas.py 0000664 0000000 0000000 00000015623 14135323752 0021626 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008-2012 Robert C. Kirby (Texas Tech University)
# Modified by Andrew T. T. McRae (Imperial College London)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT import (expansions, polynomial_set, quadrature, dual_set,
finite_element, functional)
import numpy
from itertools import chain
from FIAT.check_format_variant import check_format_variant
def RTSpace(ref_el, deg):
"""Constructs a basis for the the Raviart-Thomas space
(P_k)^d + P_k x"""
sd = ref_el.get_spatial_dimension()
vec_Pkp1 = polynomial_set.ONPolynomialSet(ref_el, deg + 1, (sd,))
dimPkp1 = expansions.polynomial_dimension(ref_el, deg + 1)
dimPk = expansions.polynomial_dimension(ref_el, deg)
dimPkm1 = expansions.polynomial_dimension(ref_el, deg - 1)
vec_Pk_indices = list(chain(*(range(i * dimPkp1, i * dimPkp1 + dimPk)
for i in range(sd))))
vec_Pk_from_Pkp1 = vec_Pkp1.take(vec_Pk_indices)
Pkp1 = polynomial_set.ONPolynomialSet(ref_el, deg + 1)
PkH = Pkp1.take(list(range(dimPkm1, dimPk)))
Q = quadrature.make_quadrature(ref_el, 2 * deg + 2)
# have to work on this through "tabulate" interface
# first, tabulate PkH at quadrature points
Qpts = numpy.array(Q.get_points())
Qwts = numpy.array(Q.get_weights())
zero_index = tuple([0 for i in range(sd)])
PkH_at_Qpts = PkH.tabulate(Qpts)[zero_index]
Pkp1_at_Qpts = Pkp1.tabulate(Qpts)[zero_index]
PkHx_coeffs = numpy.zeros((PkH.get_num_members(),
sd,
Pkp1.get_num_members()), "d")
for i in range(PkH.get_num_members()):
for j in range(sd):
fooij = PkH_at_Qpts[i, :] * Qpts[:, j] * Qwts
PkHx_coeffs[i, j, :] = numpy.dot(Pkp1_at_Qpts, fooij)
PkHx = polynomial_set.PolynomialSet(ref_el,
deg,
deg + 1,
vec_Pkp1.get_expansion_set(),
PkHx_coeffs,
vec_Pkp1.get_dmats())
return polynomial_set.polynomial_set_union_normalized(vec_Pk_from_Pkp1, PkHx)
class RTDualSet(dual_set.DualSet):
"""Dual basis for Raviart-Thomas elements consisting of point
evaluation of normals on facets of codimension 1 and internal
moments against polynomials"""
def __init__(self, ref_el, degree, variant, quad_deg):
entity_ids = {}
nodes = []
sd = ref_el.get_spatial_dimension()
t = ref_el.get_topology()
if variant == "integral":
facet = ref_el.get_facet_element()
# Facet nodes are \int_F v\cdot n p ds where p \in P_{q-1}
# degree is q - 1
Q = quadrature.make_quadrature(facet, quad_deg)
Pq = polynomial_set.ONPolynomialSet(facet, degree)
Pq_at_qpts = Pq.tabulate(Q.get_points())[tuple([0]*(sd - 1))]
for f in range(len(t[sd - 1])):
for i in range(Pq_at_qpts.shape[0]):
phi = Pq_at_qpts[i, :]
nodes.append(functional.IntegralMomentOfScaledNormalEvaluation(ref_el, Q, phi, f))
# internal nodes. These are \int_T v \cdot p dx where p \in P_{q-2}^d
if degree > 0:
Q = quadrature.make_quadrature(ref_el, quad_deg)
qpts = Q.get_points()
Pkm1 = polynomial_set.ONPolynomialSet(ref_el, degree - 1)
zero_index = tuple([0 for i in range(sd)])
Pkm1_at_qpts = Pkm1.tabulate(qpts)[zero_index]
for d in range(sd):
for i in range(Pkm1_at_qpts.shape[0]):
phi_cur = Pkm1_at_qpts[i, :]
l_cur = functional.IntegralMoment(ref_el, Q, phi_cur, (d,), (sd,))
nodes.append(l_cur)
elif variant == "point":
# codimension 1 facets
for i in range(len(t[sd - 1])):
pts_cur = ref_el.make_points(sd - 1, i, sd + degree)
for j in range(len(pts_cur)):
pt_cur = pts_cur[j]
f = functional.PointScaledNormalEvaluation(ref_el, i, pt_cur)
nodes.append(f)
# internal nodes. Let's just use points at a lattice
if degree > 0:
cpe = functional.ComponentPointEvaluation
pts = ref_el.make_points(sd, 0, degree + sd)
for d in range(sd):
for i in range(len(pts)):
l_cur = cpe(ref_el, d, (sd,), pts[i])
nodes.append(l_cur)
# sets vertices (and in 3d, edges) to have no nodes
for i in range(sd - 1):
entity_ids[i] = {}
for j in range(len(t[i])):
entity_ids[i][j] = []
cur = 0
# set codimension 1 (edges 2d, faces 3d) dof
pts_facet_0 = ref_el.make_points(sd - 1, 0, sd + degree)
pts_per_facet = len(pts_facet_0)
entity_ids[sd - 1] = {}
for i in range(len(t[sd - 1])):
entity_ids[sd - 1][i] = list(range(cur, cur + pts_per_facet))
cur += pts_per_facet
# internal nodes, if applicable
entity_ids[sd] = {0: []}
if degree > 0:
num_internal_nodes = expansions.polynomial_dimension(ref_el,
degree - 1)
entity_ids[sd][0] = list(range(cur, cur + num_internal_nodes * sd))
super(RTDualSet, self).__init__(nodes, ref_el, entity_ids)
class RaviartThomas(finite_element.CiarletElement):
"""
The Raviart Thomas element
:arg ref_el: The reference element.
:arg k: The degree.
:arg variant: optional variant specifying the types of nodes.
variant can be chosen from ["point", "integral", "integral(quadrature_degree)"]
"point" -> dofs are evaluated by point evaluation. Note that this variant has suboptimal
convergence order in the H(div)-norm
"integral" -> dofs are evaluated by quadrature rule. The quadrature degree is chosen to integrate
polynomials of degree 5*k so that most expressions will be interpolated exactly. This is important
when you want to have (nearly) divergence-preserving interpolation.
"integral(quadrature_degree)" -> dofs are evaluated by quadrature rule of degree quadrature_degree
"""
def __init__(self, ref_el, k, variant=None):
degree = k - 1
(variant, quad_deg) = check_format_variant(variant, degree, "Raviart Thomas")
poly_set = RTSpace(ref_el, degree)
dual = RTDualSet(ref_el, degree, variant, quad_deg)
formdegree = ref_el.get_spatial_dimension() - 1 # (n-1)-form
super(RaviartThomas, self).__init__(poly_set, dual, degree, formdegree,
mapping="contravariant piola")
fiat-2019.2.0~git20210419.7d418fa/FIAT/reference_element.py 0000664 0000000 0000000 00000111470 14135323752 0022247 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008 Robert C. Kirby (Texas Tech University)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Modified by David A. Ham (david.ham@imperial.ac.uk), 2014
# Modified by Lizao Li (lzlarryli@gmail.com), 2016
"""
Abstract class and particular implementations of finite element
reference simplex geometry/topology.
Provides an abstract base class and particular implementations for the
reference simplex geometry and topology.
The rest of FIAT is abstracted over this module so that different
reference element geometry (e.g. a vertex at (0,0) versus at (-1,-1))
and orderings of entities have a single point of entry.
Currently implemented are UFC and Default Line, Triangle and Tetrahedron.
"""
from itertools import chain, product, count
from functools import reduce
from collections import defaultdict
import operator
from math import factorial
import numpy
POINT = 0
LINE = 1
TRIANGLE = 2
TETRAHEDRON = 3
QUADRILATERAL = 11
HEXAHEDRON = 111
TENSORPRODUCT = 99
def lattice_iter(start, finish, depth):
"""Generator iterating over the depth-dimensional lattice of
integers between start and (finish-1). This works on simplices in
1d, 2d, 3d, and beyond"""
if depth == 0:
return
elif depth == 1:
for ii in range(start, finish):
yield [ii]
else:
for ii in range(start, finish):
for jj in lattice_iter(start, finish - ii, depth - 1):
yield jj + [ii]
def make_lattice(verts, n, interior=0):
"""Constructs a lattice of points on the simplex defined by verts.
For example, the 1:st order lattice will be just the vertices.
The optional argument interior specifies how many points from
the boundary to omit. For example, on a line with n = 2,
and interior = 0, this function will return the vertices and
midpoint, but with interior = 1, it will only return the
midpoint."""
vs = numpy.array(verts)
hs = (vs - vs[0])[1:, :] / n
m = hs.shape[0]
result = [tuple(vs[0] + numpy.array(indices).dot(hs))
for indices in lattice_iter(interior, n + 1 - interior, m)]
return result
def linalg_subspace_intersection(A, B):
"""Computes the intersection of the subspaces spanned by the
columns of 2-dimensional arrays A,B using the algorithm found in
Golub and van Loan (3rd ed) p. 604. A should be in
R^{m,p} and B should be in R^{m,q}. Returns an orthonormal basis
for the intersection of the spaces, stored in the columns of
the result."""
# check that vectors are in same space
if A.shape[0] != B.shape[0]:
raise Exception("Dimension error")
# A,B are matrices of column vectors
# compute the intersection of span(A) and span(B)
# Compute the principal vectors/angles between the subspaces, G&vL
# p.604
(qa, _ra) = numpy.linalg.qr(A)
(qb, _rb) = numpy.linalg.qr(B)
C = numpy.dot(numpy.transpose(qa), qb)
(y, c, _zt) = numpy.linalg.svd(C)
U = numpy.dot(qa, y)
rank_c = len([s for s in c if numpy.abs(1.0 - s) < 1.e-10])
return U[:, :rank_c]
class Cell(object):
"""Abstract class for a reference cell. Provides accessors for
geometry (vertex coordinates) as well as topology (orderings of
vertices that make up edges, facecs, etc."""
def __init__(self, shape, vertices, topology):
"""The constructor takes a shape code, the physical vertices expressed
as a list of tuples of numbers, and the topology of a cell.
The topology is stored as a dictionary of dictionaries t[i][j]
where i is the dimension and j is the index of the facet of
that dimension. The result is a list of the vertices
comprising the facet."""
self.shape = shape
self.vertices = vertices
self.topology = topology
# Given the topology, work out for each entity in the cell,
# which other entities it contains.
self.sub_entities = {}
for dim, entities in topology.items():
self.sub_entities[dim] = {}
for e, v in entities.items():
vertices = frozenset(v)
sub_entities = []
for dim_, entities_ in topology.items():
for e_, vertices_ in entities_.items():
if vertices.issuperset(vertices_):
sub_entities.append((dim_, e_))
# Sort for the sake of determinism and by UFC conventions
self.sub_entities[dim][e] = sorted(sub_entities)
# Build connectivity dictionary for easier queries
self.connectivity = {}
for dim0, sub_entities in self.sub_entities.items():
# Skip tensor product entities
# TODO: Can we do something better?
if isinstance(dim0, tuple):
continue
for entity, sub_sub_entities in sorted(sub_entities.items()):
for dim1 in range(dim0+1):
d01_entities = filter(lambda x: x[0] == dim1, sub_sub_entities)
d01_entities = tuple(x[1] for x in d01_entities)
self.connectivity.setdefault((dim0, dim1), []).append(d01_entities)
def _key(self):
"""Hashable object key data (excluding type)."""
# Default: only type matters
return None
def __eq__(self, other):
return type(self) == type(other) and self._key() == other._key()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((type(self), self._key()))
def get_shape(self):
"""Returns the code for the element's shape."""
return self.shape
def get_vertices(self):
"""Returns an iterable of the element's vertices, each stored as a
tuple."""
return self.vertices
def get_spatial_dimension(self):
"""Returns the spatial dimension in which the element lives."""
return len(self.vertices[0])
def get_topology(self):
"""Returns a dictionary encoding the topology of the element.
The dictionary's keys are the spatial dimensions (0, 1, ...)
and each value is a dictionary mapping."""
return self.topology
def get_connectivity(self):
"""Returns a dictionary encoding the connectivity of the element.
The dictionary's keys are the spatial dimensions pairs ((1, 0),
(2, 0), (2, 1), ...) and each value is a list with entities
of second dimension ordered by local dim0-dim1 numbering."""
return self.connectivity
def get_vertices_of_subcomplex(self, t):
"""Returns the tuple of vertex coordinates associated with the labels
contained in the iterable t."""
return tuple([self.vertices[ti] for ti in t])
def get_dimension(self):
"""Returns the subelement dimension of the cell. For tensor
product cells, this a tuple of dimensions for each cell in the
product. For all other cells, this is the same as the spatial
dimension."""
raise NotImplementedError("Should be implemented in a subclass.")
def construct_subelement(self, dimension):
"""Constructs the reference element of a cell subentity
specified by subelement dimension.
:arg dimension: `tuple` for tensor product cells, `int` otherwise
"""
raise NotImplementedError("Should be implemented in a subclass.")
def get_entity_transform(self, dim, entity_i):
"""Returns a mapping of point coordinates from the
`entity_i`-th subentity of dimension `dim` to the cell.
:arg dim: `tuple` for tensor product cells, `int` otherwise
:arg entity_i: entity number (integer)
"""
raise NotImplementedError("Should be implemented in a subclass.")
class Simplex(Cell):
"""Abstract class for a reference simplex."""
def compute_normal(self, facet_i):
"""Returns the unit normal vector to facet i of codimension 1."""
# Interval case
if self.get_shape() == LINE:
verts = numpy.asarray(self.vertices)
v_i, = self.get_topology()[0][facet_i]
n = verts[v_i] - verts[[1, 0][v_i]]
return n / numpy.linalg.norm(n)
# first, let's compute the span of the simplex
# This is trivial if we have a d-simplex in R^d.
# Not so otherwise.
vert_vecs = [numpy.array(v)
for v in self.vertices]
vert_vecs_foo = numpy.array([vert_vecs[i] - vert_vecs[0]
for i in range(1, len(vert_vecs))])
(u, s, vt) = numpy.linalg.svd(vert_vecs_foo)
rank = len([si for si in s if si > 1.e-10])
# this is the set of vectors that span the simplex
spanu = u[:, :rank]
t = self.get_topology()
sd = self.get_spatial_dimension()
vert_coords_of_facet = \
self.get_vertices_of_subcomplex(t[sd-1][facet_i])
# now I find everything normal to the facet.
vcf = [numpy.array(foo)
for foo in vert_coords_of_facet]
facet_span = numpy.array([vcf[i] - vcf[0]
for i in range(1, len(vcf))])
(uf, sf, vft) = numpy.linalg.svd(facet_span)
# now get the null space from vft
rankfacet = len([si for si in sf if si > 1.e-10])
facet_normal_space = numpy.transpose(vft[rankfacet:, :])
# now, I have to compute the intersection of
# facet_span with facet_normal_space
foo = linalg_subspace_intersection(facet_normal_space, spanu)
num_cols = foo.shape[1]
if num_cols != 1:
raise Exception("barf in normal computation")
# now need to get the correct sign
# get a vector in the direction
nfoo = foo[:, 0]
# what is the vertex not in the facet?
verts_set = set(t[sd][0])
verts_facet = set(t[sd - 1][facet_i])
verts_diff = verts_set.difference(verts_facet)
if len(verts_diff) != 1:
raise Exception("barf in normal computation: getting sign")
vert_off = verts_diff.pop()
vert_on = verts_facet.pop()
# get a vector from the off vertex to the facet
v_to_facet = numpy.array(self.vertices[vert_on]) \
- numpy.array(self.vertices[vert_off])
if numpy.dot(v_to_facet, nfoo) > 0.0:
return nfoo
else:
return -nfoo
def compute_tangents(self, dim, i):
"""Computes tangents in any dimension based on differences
between vertices and the first vertex of the i:th facet
of dimension dim. Returns a (possibly empty) list.
These tangents are *NOT* normalized to have unit length."""
t = self.get_topology()
vs = list(map(numpy.array, self.get_vertices_of_subcomplex(t[dim][i])))
ts = [v - vs[0] for v in vs[1:]]
return ts
def compute_normalized_tangents(self, dim, i):
"""Computes tangents in any dimension based on differences
between vertices and the first vertex of the i:th facet
of dimension dim. Returns a (possibly empty) list.
These tangents are normalized to have unit length."""
ts = self.compute_tangents(dim, i)
return [t / numpy.linalg.norm(t) for t in ts]
def compute_edge_tangent(self, edge_i):
"""Computes the nonnormalized tangent to a 1-dimensional facet.
returns a single vector."""
t = self.get_topology()
(v0, v1) = self.get_vertices_of_subcomplex(t[1][edge_i])
return numpy.array(v1) - numpy.array(v0)
def compute_normalized_edge_tangent(self, edge_i):
"""Computes the unit tangent vector to a 1-dimensional facet"""
v = self.compute_edge_tangent(edge_i)
return v / numpy.linalg.norm(v)
def compute_face_tangents(self, face_i):
"""Computes the two tangents to a face. Only implemented
for a tetrahedron."""
if self.get_spatial_dimension() != 3:
raise Exception("can't get face tangents yet")
t = self.get_topology()
(v0, v1, v2) = list(map(numpy.array,
self.get_vertices_of_subcomplex(t[2][face_i])))
return (v1 - v0, v2 - v0)
def compute_face_edge_tangents(self, dim, entity_id):
"""Computes all the edge tangents of any k-face with k>=1.
The result is a array of binom(dim+1,2) vectors.
This agrees with `compute_edge_tangent` when dim=1.
"""
vert_ids = self.get_topology()[dim][entity_id]
vert_coords = [numpy.array(x)
for x in self.get_vertices_of_subcomplex(vert_ids)]
edge_ts = []
for source in range(dim):
for dest in range(source + 1, dim + 1):
edge_ts.append(vert_coords[dest] - vert_coords[source])
return edge_ts
def make_points(self, dim, entity_id, order):
"""Constructs a lattice of points on the entity_id:th
facet of dimension dim. Order indicates how many points to
include in each direction."""
if dim == 0:
return (self.get_vertices()[entity_id], )
elif 0 < dim < self.get_spatial_dimension():
entity_verts = \
self.get_vertices_of_subcomplex(
self.get_topology()[dim][entity_id])
return make_lattice(entity_verts, order, 1)
elif dim == self.get_spatial_dimension():
return make_lattice(self.get_vertices(), order, 1)
else:
raise ValueError("illegal dimension")
def volume(self):
"""Computes the volume of the simplex in the appropriate
dimensional measure."""
return volume(self.get_vertices())
def volume_of_subcomplex(self, dim, facet_no):
vids = self.topology[dim][facet_no]
return volume(self.get_vertices_of_subcomplex(vids))
def compute_scaled_normal(self, facet_i):
"""Returns the unit normal to facet_i of scaled by the
volume of that facet."""
dim = self.get_spatial_dimension()
v = self.volume_of_subcomplex(dim - 1, facet_i)
return self.compute_normal(facet_i) * v
def compute_reference_normal(self, facet_dim, facet_i):
"""Returns the unit normal in infinity norm to facet_i."""
assert facet_dim == self.get_spatial_dimension() - 1
n = Simplex.compute_normal(self, facet_i) # skip UFC overrides
return n / numpy.linalg.norm(n, numpy.inf)
def get_entity_transform(self, dim, entity):
"""Returns a mapping of point coordinates from the
`entity`-th subentity of dimension `dim` to the cell.
:arg dim: subentity dimension (integer)
:arg entity: entity number (integer)
"""
topology = self.get_topology()
celldim = self.get_spatial_dimension()
codim = celldim - dim
if dim == 0:
# Special case vertices.
i, = topology[dim][entity]
vertex = self.get_vertices()[i]
return lambda point: vertex
elif dim == celldim:
assert entity == 0
return lambda point: point
try:
subcell = self.construct_subelement(dim)
except NotImplementedError:
# Special case for 1D elements.
x_c, = self.get_vertices_of_subcomplex(topology[0][entity])
return lambda x: x_c
subdim = subcell.get_spatial_dimension()
assert subdim == celldim - codim
# Entity vertices in entity space.
v_e = numpy.asarray(subcell.get_vertices())
A = numpy.zeros([subdim, subdim])
for i in range(subdim):
A[i, :] = (v_e[i + 1] - v_e[0])
A[i, :] /= A[i, :].dot(A[i, :])
# Entity vertices in cell space.
v_c = numpy.asarray(self.get_vertices_of_subcomplex(topology[dim][entity]))
B = numpy.zeros([celldim, subdim])
for j in range(subdim):
B[:, j] = (v_c[j + 1] - v_c[0])
C = B.dot(A)
offset = v_c[0] - C.dot(v_e[0])
return lambda x: offset + C.dot(x)
def get_dimension(self):
"""Returns the subelement dimension of the cell. Same as the
spatial dimension."""
return self.get_spatial_dimension()
# Backwards compatible name
ReferenceElement = Simplex
class UFCSimplex(Simplex):
def get_facet_element(self):
dimension = self.get_spatial_dimension()
return self.construct_subelement(dimension - 1)
def construct_subelement(self, dimension):
"""Constructs the reference element of a cell subentity
specified by subelement dimension.
:arg dimension: subentity dimension (integer)
"""
return ufc_simplex(dimension)
def contains_point(self, point, epsilon=0):
"""Checks if reference cell contains given point
(with numerical tolerance)."""
result = (sum(point) - epsilon <= 1)
for c in point:
result &= (c + epsilon >= 0)
return result
class Point(Simplex):
"""This is the reference point."""
def __init__(self):
verts = ((),)
topology = {0: {0: (0,)}}
super(Point, self).__init__(POINT, verts, topology)
def construct_subelement(self, dimension):
"""Constructs the reference element of a cell subentity
specified by subelement dimension.
:arg dimension: subentity dimension (integer). Must be zero.
"""
assert dimension == 0
return self
class DefaultLine(Simplex):
"""This is the reference line with vertices (-1.0,) and (1.0,)."""
def __init__(self):
verts = ((-1.0,), (1.0,))
edges = {0: (0, 1)}
topology = {0: {0: (0,), 1: (1,)},
1: edges}
super(DefaultLine, self).__init__(LINE, verts, topology)
def get_facet_element(self):
raise NotImplementedError()
class UFCInterval(UFCSimplex):
"""This is the reference interval with vertices (0.0,) and (1.0,)."""
def __init__(self):
verts = ((0.0,), (1.0,))
edges = {0: (0, 1)}
topology = {0: {0: (0,), 1: (1,)},
1: edges}
super(UFCInterval, self).__init__(LINE, verts, topology)
class DefaultTriangle(Simplex):
"""This is the reference triangle with vertices (-1.0,-1.0),
(1.0,-1.0), and (-1.0,1.0)."""
def __init__(self):
verts = ((-1.0, -1.0), (1.0, -1.0), (-1.0, 1.0))
edges = {0: (1, 2),
1: (2, 0),
2: (0, 1)}
faces = {0: (0, 1, 2)}
topology = {0: {0: (0,), 1: (1,), 2: (2,)},
1: edges, 2: faces}
super(DefaultTriangle, self).__init__(TRIANGLE, verts, topology)
def get_facet_element(self):
return DefaultLine()
class UFCTriangle(UFCSimplex):
"""This is the reference triangle with vertices (0.0,0.0),
(1.0,0.0), and (0.0,1.0)."""
def __init__(self):
verts = ((0.0, 0.0), (1.0, 0.0), (0.0, 1.0))
edges = {0: (1, 2), 1: (0, 2), 2: (0, 1)}
faces = {0: (0, 1, 2)}
topology = {0: {0: (0,), 1: (1,), 2: (2,)},
1: edges, 2: faces}
super(UFCTriangle, self).__init__(TRIANGLE, verts, topology)
def compute_normal(self, i):
"UFC consistent normal"
t = self.compute_tangents(1, i)[0]
n = numpy.array((t[1], -t[0]))
return n / numpy.linalg.norm(n)
class IntrepidTriangle(Simplex):
"""This is the Intrepid triangle with vertices (0,0),(1,0),(0,1)"""
def __init__(self):
verts = ((0.0, 0.0), (1.0, 0.0), (0.0, 1.0))
edges = {0: (0, 1),
1: (1, 2),
2: (2, 0)}
faces = {0: (0, 1, 2)}
topology = {0: {0: (0,), 1: (1,), 2: (2,)},
1: edges, 2: faces}
super(IntrepidTriangle, self).__init__(TRIANGLE, verts, topology)
def get_facet_element(self):
# I think the UFC interval is equivalent to what the
# IntrepidInterval would be.
return UFCInterval()
class DefaultTetrahedron(Simplex):
"""This is the reference tetrahedron with vertices (-1,-1,-1),
(1,-1,-1),(-1,1,-1), and (-1,-1,1)."""
def __init__(self):
verts = ((-1.0, -1.0, -1.0), (1.0, -1.0, -1.0),
(-1.0, 1.0, -1.0), (-1.0, -1.0, 1.0))
vs = {0: (0, ),
1: (1, ),
2: (2, ),
3: (3, )}
edges = {0: (1, 2),
1: (2, 0),
2: (0, 1),
3: (0, 3),
4: (1, 3),
5: (2, 3)}
faces = {0: (1, 3, 2),
1: (2, 3, 0),
2: (3, 1, 0),
3: (0, 1, 2)}
tets = {0: (0, 1, 2, 3)}
topology = {0: vs, 1: edges, 2: faces, 3: tets}
super(DefaultTetrahedron, self).__init__(TETRAHEDRON, verts, topology)
def get_facet_element(self):
return DefaultTriangle()
class IntrepidTetrahedron(Simplex):
"""This is the reference tetrahedron with vertices (0,0,0),
(1,0,0),(0,1,0), and (0,0,1) used in the Intrepid project."""
def __init__(self):
verts = ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0))
vs = {0: (0, ),
1: (1, ),
2: (2, ),
3: (3, )}
edges = {0: (0, 1),
1: (1, 2),
2: (2, 0),
3: (0, 3),
4: (1, 3),
5: (2, 3)}
faces = {0: (0, 1, 3),
1: (1, 2, 3),
2: (0, 3, 2),
3: (0, 2, 1)}
tets = {0: (0, 1, 2, 3)}
topology = {0: vs, 1: edges, 2: faces, 3: tets}
super(IntrepidTetrahedron, self).__init__(TETRAHEDRON, verts, topology)
def get_facet_element(self):
return IntrepidTriangle()
class UFCTetrahedron(UFCSimplex):
"""This is the reference tetrahedron with vertices (0,0,0),
(1,0,0),(0,1,0), and (0,0,1)."""
def __init__(self):
verts = ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0))
vs = {0: (0, ),
1: (1, ),
2: (2, ),
3: (3, )}
edges = {0: (2, 3),
1: (1, 3),
2: (1, 2),
3: (0, 3),
4: (0, 2),
5: (0, 1)}
faces = {0: (1, 2, 3),
1: (0, 2, 3),
2: (0, 1, 3),
3: (0, 1, 2)}
tets = {0: (0, 1, 2, 3)}
topology = {0: vs, 1: edges, 2: faces, 3: tets}
super(UFCTetrahedron, self).__init__(TETRAHEDRON, verts, topology)
def compute_normal(self, i):
"UFC consistent normals."
t = self.compute_tangents(2, i)
n = numpy.cross(t[0], t[1])
return -2.0 * n / numpy.linalg.norm(n)
class TensorProductCell(Cell):
"""A cell that is the product of FIAT cells."""
def __init__(self, *cells):
# Vertices
vertices = tuple(tuple(chain(*coords))
for coords in product(*[cell.get_vertices()
for cell in cells]))
# Topology
shape = tuple(len(c.get_vertices()) for c in cells)
topology = {}
for dim in product(*[cell.get_topology().keys()
for cell in cells]):
topology[dim] = {}
topds = [cell.get_topology()[d]
for cell, d in zip(cells, dim)]
for tuple_ei in product(*[sorted(topd)for topd in topds]):
tuple_vs = list(product(*[topd[ei]
for topd, ei in zip(topds, tuple_ei)]))
vs = tuple(numpy.ravel_multi_index(numpy.transpose(tuple_vs), shape))
topology[dim][tuple_ei] = vs
# flatten entity numbers
topology[dim] = dict(enumerate(topology[dim][key]
for key in sorted(topology[dim])))
super(TensorProductCell, self).__init__(TENSORPRODUCT, vertices, topology)
self.cells = tuple(cells)
def _key(self):
return self.cells
@staticmethod
def _split_slices(lengths):
n = len(lengths)
delimiter = [0] * (n + 1)
for i in range(n):
delimiter[i + 1] = delimiter[i] + lengths[i]
return [slice(delimiter[i], delimiter[i+1])
for i in range(n)]
def get_dimension(self):
"""Returns the subelement dimension of the cell, a tuple of
dimensions for each cell in the product."""
return tuple(c.get_dimension() for c in self.cells)
def construct_subelement(self, dimension):
"""Constructs the reference element of a cell subentity
specified by subelement dimension.
:arg dimension: dimension in each "direction" (tuple)
"""
return TensorProductCell(*[c.construct_subelement(d)
for c, d in zip(self.cells, dimension)])
def get_entity_transform(self, dim, entity_i):
"""Returns a mapping of point coordinates from the
`entity_i`-th subentity of dimension `dim` to the cell.
:arg dim: subelement dimension (tuple)
:arg entity_i: entity number (integer)
"""
# unravel entity_i
shape = tuple(len(c.get_topology()[d])
for c, d in zip(self.cells, dim))
alpha = numpy.unravel_index(entity_i, shape)
# entity transform on each subcell
sct = [c.get_entity_transform(d, i)
for c, d, i in zip(self.cells, dim, alpha)]
slices = TensorProductCell._split_slices(dim)
def transform(point):
return list(chain(*[t(point[s])
for t, s in zip(sct, slices)]))
return transform
def volume(self):
"""Computes the volume in the appropriate dimensional measure."""
return numpy.prod([c.volume() for c in self.cells])
def compute_reference_normal(self, facet_dim, facet_i):
"""Returns the unit normal in infinity norm to facet_i of
subelement dimension facet_dim."""
assert len(facet_dim) == len(self.get_dimension())
indicator = numpy.array(self.get_dimension()) - numpy.array(facet_dim)
(cell_i,), = numpy.nonzero(indicator)
n = []
for i, c in enumerate(self.cells):
if cell_i == i:
n.extend(c.compute_reference_normal(facet_dim[i], facet_i))
else:
n.extend([0] * c.get_spatial_dimension())
return numpy.asarray(n)
def contains_point(self, point, epsilon=0):
"""Checks if reference cell contains given point
(with numerical tolerance)."""
lengths = [c.get_spatial_dimension() for c in self.cells]
assert len(point) == sum(lengths)
slices = TensorProductCell._split_slices(lengths)
return reduce(operator.and_,
(c.contains_point(point[s], epsilon=epsilon)
for c, s in zip(self.cells, slices)),
True)
class UFCQuadrilateral(Cell):
"""This is the reference quadrilateral with vertices
(0.0, 0.0), (0.0, 1.0), (1.0, 0.0) and (1.0, 1.0)."""
def __init__(self):
product = TensorProductCell(UFCInterval(), UFCInterval())
pt = product.get_topology()
verts = product.get_vertices()
topology = flatten_entities(pt)
super(UFCQuadrilateral, self).__init__(QUADRILATERAL, verts, topology)
self.product = product
self.unflattening_map = compute_unflattening_map(pt)
def get_dimension(self):
"""Returns the subelement dimension of the cell. Same as the
spatial dimension."""
return self.get_spatial_dimension()
def construct_subelement(self, dimension):
"""Constructs the reference element of a cell subentity
specified by subelement dimension.
:arg dimension: subentity dimension (integer)
"""
if dimension == 2:
return self
elif dimension == 1:
return UFCInterval()
elif dimension == 0:
return Point()
else:
raise ValueError("Invalid dimension: %d" % (dimension,))
def get_entity_transform(self, dim, entity_i):
"""Returns a mapping of point coordinates from the
`entity_i`-th subentity of dimension `dim` to the cell.
:arg dim: entity dimension (integer)
:arg entity_i: entity number (integer)
"""
d, e = self.unflattening_map[(dim, entity_i)]
return self.product.get_entity_transform(d, e)
def volume(self):
"""Computes the volume in the appropriate dimensional measure."""
return self.product.volume()
def compute_reference_normal(self, facet_dim, facet_i):
"""Returns the unit normal in infinity norm to facet_i."""
assert facet_dim == 1
d, i = self.unflattening_map[(facet_dim, facet_i)]
return self.product.compute_reference_normal(d, i)
def contains_point(self, point, epsilon=0):
"""Checks if reference cell contains given point
(with numerical tolerance)."""
return self.product.contains_point(point, epsilon=epsilon)
class UFCHexahedron(Cell):
"""This is the reference hexahedron with vertices
(0.0, 0.0, 0.0), (0.0, 0.0, 1.0), (0.0, 1.0, 0.0), (0.0, 1.0, 1.0),
(1.0, 0.0, 0.0), (1.0, 0.0, 1.0), (1.0, 1.0, 0.0) and (1.0, 1.0, 1.0)."""
def __init__(self):
product = TensorProductCell(UFCInterval(), UFCInterval(), UFCInterval())
pt = product.get_topology()
verts = product.get_vertices()
topology = flatten_entities(pt)
super(UFCHexahedron, self).__init__(HEXAHEDRON, verts, topology)
self.product = product
self.unflattening_map = compute_unflattening_map(pt)
def get_dimension(self):
"""Returns the subelement dimension of the cell. Same as the
spatial dimension."""
return self.get_spatial_dimension()
def construct_subelement(self, dimension):
"""Constructs the reference element of a cell subentity
specified by subelement dimension.
:arg dimension: subentity dimension (integer)
"""
if dimension == 3:
return self
elif dimension == 2:
return UFCQuadrilateral()
elif dimension == 1:
return UFCInterval()
elif dimension == 0:
return Point()
else:
raise ValueError("Invalid dimension: %d" % (dimension,))
def get_entity_transform(self, dim, entity_i):
"""Returns a mapping of point coordinates from the
`entity_i`-th subentity of dimension `dim` to the cell.
:arg dim: entity dimension (integer)
:arg entity_i: entity number (integer)
"""
d, e = self.unflattening_map[(dim, entity_i)]
return self.product.get_entity_transform(d, e)
def volume(self):
"""Computes the volume in the appropriate dimensional measure."""
return self.product.volume()
def compute_reference_normal(self, facet_dim, facet_i):
"""Returns the unit normal in infinity norm to facet_i."""
assert facet_dim == 2
d, i = self.unflattening_map[(facet_dim, facet_i)]
return self.product.compute_reference_normal(d, i)
def contains_point(self, point, epsilon=0):
"""Checks if reference cell contains given point
(with numerical tolerance)."""
return self.product.contains_point(point, epsilon=epsilon)
def make_affine_mapping(xs, ys):
"""Constructs (A,b) such that x --> A * x + b is the affine
mapping from the simplex defined by xs to the simplex defined by ys."""
dim_x = len(xs[0])
dim_y = len(ys[0])
if len(xs) != len(ys):
raise Exception("")
# find A in R^{dim_y,dim_x}, b in R^{dim_y} such that
# A xs[i] + b = ys[i] for all i
mat = numpy.zeros((dim_x * dim_y + dim_y, dim_x * dim_y + dim_y), "d")
rhs = numpy.zeros((dim_x * dim_y + dim_y,), "d")
# loop over points
for i in range(len(xs)):
# loop over components of each A * point + b
for j in range(dim_y):
row_cur = i * dim_y + j
col_start = dim_x * j
col_finish = col_start + dim_x
mat[row_cur, col_start:col_finish] = numpy.array(xs[i])
rhs[row_cur] = ys[i][j]
# need to get terms related to b
mat[row_cur, dim_y * dim_x + j] = 1.0
sol = numpy.linalg.solve(mat, rhs)
A = numpy.reshape(sol[:dim_x * dim_y], (dim_y, dim_x))
b = sol[dim_x * dim_y:]
return A, b
def default_simplex(spatial_dim):
"""Factory function that maps spatial dimension to an instance of
the default reference simplex of that dimension."""
if spatial_dim == 1:
return DefaultLine()
elif spatial_dim == 2:
return DefaultTriangle()
elif spatial_dim == 3:
return DefaultTetrahedron()
else:
raise RuntimeError("Can't create default simplex of dimension %s." % str(spatial_dim))
def ufc_simplex(spatial_dim):
"""Factory function that maps spatial dimension to an instance of
the UFC reference simplex of that dimension."""
if spatial_dim == 0:
return Point()
elif spatial_dim == 1:
return UFCInterval()
elif spatial_dim == 2:
return UFCTriangle()
elif spatial_dim == 3:
return UFCTetrahedron()
else:
raise RuntimeError("Can't create UFC simplex of dimension %s." % str(spatial_dim))
def ufc_cell(cell):
"""Handle incoming calls from FFC."""
# celltype could be a string or a cell.
if isinstance(cell, str):
celltype = cell
else:
celltype = cell.cellname()
if " * " in celltype:
# Tensor product cell
return TensorProductCell(*map(ufc_cell, celltype.split(" * ")))
elif celltype == "quadrilateral":
return UFCQuadrilateral()
elif celltype == "hexahedron":
return UFCHexahedron()
elif celltype == "vertex":
return ufc_simplex(0)
elif celltype == "interval":
return ufc_simplex(1)
elif celltype == "triangle":
return ufc_simplex(2)
elif celltype == "tetrahedron":
return ufc_simplex(3)
else:
raise RuntimeError("Don't know how to create UFC cell of type %s" % str(celltype))
def volume(verts):
"""Constructs the volume of the simplex spanned by verts"""
# use fact that volume of UFC reference element is 1/n!
sd = len(verts) - 1
ufcel = ufc_simplex(sd)
ufcverts = ufcel.get_vertices()
A, b = make_affine_mapping(ufcverts, verts)
# can't just take determinant since, e.g. the face of
# a tet being mapped to a 2d triangle doesn't have a
# square matrix
(u, s, vt) = numpy.linalg.svd(A)
# this is the determinant of the "square part" of the matrix
# (ie the part that maps the restriction of the higher-dimensional
# stuff to UFC element
p = numpy.prod([si for si in s if (si) > 1.e-10])
return p / factorial(sd)
def tuple_sum(tree):
"""
This function calculates the sum of elements in a tuple, it is needed to handle nested tuples in TensorProductCell.
Example: tuple_sum(((1, 0), 1)) returns 2
If input argument is not the tuple, returns input.
"""
if isinstance(tree, tuple):
return sum(map(tuple_sum, tree))
else:
return tree
def is_hypercube(cell):
if isinstance(cell, (DefaultLine, UFCInterval, UFCQuadrilateral, UFCHexahedron)):
return True
elif isinstance(cell, TensorProductCell):
return reduce(lambda a, b: a and b, [is_hypercube(c) for c in cell.cells])
else:
return False
def flatten_reference_cube(ref_el):
"""This function flattens a Tensor Product hypercube to the corresponding UFC hypercube"""
flattened_cube = {2: UFCQuadrilateral(), 3: UFCHexahedron()}
if numpy.sum(ref_el.get_dimension()) <= 1:
# Just return point/interval cell arguments
return ref_el
else:
# Handle cases where cell is a quad/cube constructed from a tensor product or
# an already flattened element
if is_hypercube(ref_el):
return flattened_cube[numpy.sum(ref_el.get_dimension())]
else:
raise TypeError('Invalid cell type')
def flatten_entities(topology_dict):
"""This function flattens topology dict of TensorProductCell and entity_dofs dict of TensorProductElement"""
flattened_entities = defaultdict(list)
for dim in sorted(topology_dict.keys()):
flat_dim = tuple_sum(dim)
flattened_entities[flat_dim] += [v for k, v in sorted(topology_dict[dim].items())]
return {dim: dict(enumerate(entities))
for dim, entities in flattened_entities.items()}
def compute_unflattening_map(topology_dict):
"""This function returns unflattening map for the given tensor product topology dict."""
counter = defaultdict(count)
unflattening_map = {}
for dim, entities in sorted(topology_dict.items()):
flat_dim = tuple_sum(dim)
for entity in entities:
flat_entity = next(counter[flat_dim])
unflattening_map[(flat_dim, flat_entity)] = (dim, entity)
return unflattening_map
fiat-2019.2.0~git20210419.7d418fa/FIAT/regge.py 0000664 0000000 0000000 00000007234 14135323752 0017673 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
"""Implementation of the generalized Regge finite elements."""
# Copyright (C) 2015-2018 Lizao Li
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT.finite_element import CiarletElement
from FIAT.dual_set import DualSet
from FIAT.polynomial_set import ONSymTensorPolynomialSet
from FIAT.functional import PointwiseInnerProductEvaluation as InnerProduct
class ReggeDual(DualSet):
"""Degrees of freedom for generalized Regge finite elements."""
def __init__(self, cell, degree):
dim = cell.get_spatial_dimension()
if (dim < 2) or (dim > 3):
raise ValueError("Generalized Regge elements are implemented only "
"for dimension 2--3. For 1D, it is just DG(r).")
# construct the degrees of freedoms
dofs = [] # list of functionals
# dof_ids[i][j] contains the indices of dofs that are associated with
# entity j in dim i
dof_ids = {}
# no vertex dof
dof_ids[0] = {i: [] for i in range(dim + 1)}
# edge dofs
(_dofs, _dof_ids) = self._generate_dofs(cell, 1, degree, 0)
dofs.extend(_dofs)
dof_ids[1] = _dof_ids
# facet dofs for 3D
if dim == 3:
(_dofs, _dof_ids) = self._generate_dofs(cell, 2, degree, len(dofs))
dofs.extend(_dofs)
dof_ids[2] = _dof_ids
# cell dofs
(_dofs, _dof_ids) = self._generate_dofs(cell, dim, degree, len(dofs))
dofs.extend(_dofs)
dof_ids[dim] = _dof_ids
super(ReggeDual, self).__init__(dofs, cell, dof_ids)
@staticmethod
def _generate_dofs(cell, entity_dim, degree, offset):
"""Generate degrees of freedom for enetities of dimension entity_dim
Input: all obvious except
offset -- the current first available dof id.
Output:
dofs -- an array of dofs associated to entities in that dim
dof_ids -- a dict mapping entity_id to the range of indices of dofs
associated to it.
On a k-face for degree r, the dofs are given by the value of
t^T u t
evaluated at points enough to control P(r-k+1) for all the edge
tangents of the face.
`cell.make_points(entity_dim, entity_id, degree + 2)` happens to
generate exactly those points needed.
"""
dofs = []
dof_ids = {}
num_entities = len(cell.get_topology()[entity_dim])
for entity_id in range(num_entities):
pts = cell.make_points(entity_dim, entity_id, degree + 2)
tangents = cell.compute_face_edge_tangents(entity_dim, entity_id)
dofs += [InnerProduct(cell, t, t, pt)
for pt in pts
for t in tangents]
num_new_dofs = len(pts) * len(tangents)
dof_ids[entity_id] = list(range(offset, offset + num_new_dofs))
offset += num_new_dofs
return (dofs, dof_ids)
class Regge(CiarletElement):
"""The generalized Regge elements for symmetric-matrix-valued functions.
REG(r) in dimension n is the space of polynomial symmetric-matrix-valued
functions of degree r or less with tangential-tangential continuity.
"""
def __init__(self, cell, degree):
assert degree >= 0, "Regge start at degree 0!"
# shape functions
Ps = ONSymTensorPolynomialSet(cell, degree)
# degrees of freedom
Ls = ReggeDual(cell, degree)
# mapping under affine transformation
mapping = "double covariant piola"
super(Regge, self).__init__(Ps, Ls, degree, mapping=mapping)
fiat-2019.2.0~git20210419.7d418fa/FIAT/restricted.py 0000664 0000000 0000000 00000007432 14135323752 0020752 0 ustar 00root root 0000000 0000000 # Copyright (C) 2015-2016 Jan Blechta, Andrew T T McRae, and others
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT.dual_set import DualSet
from FIAT.finite_element import CiarletElement
class RestrictedElement(CiarletElement):
"""Restrict given element to specified list of dofs."""
def __init__(self, element, indices=None, restriction_domain=None):
'''For sake of argument, indices overrides restriction_domain'''
if not (indices or restriction_domain):
raise RuntimeError("Either indices or restriction_domain must be passed in")
if not indices:
indices = _get_indices(element, restriction_domain)
if isinstance(indices, str):
raise RuntimeError("variable 'indices' was a string; did you forget to use a keyword?")
if len(indices) == 0:
raise ValueError("No point in creating empty RestrictedElement.")
self._element = element
self._indices = indices
# Fetch reference element
ref_el = element.get_reference_element()
# Restrict primal set
poly_set = element.get_nodal_basis().take(indices)
# Restrict dual set
dof_counter = 0
entity_ids = {}
nodes = []
nodes_old = element.dual_basis()
for d, entities in element.entity_dofs().items():
entity_ids[d] = {}
for entity, dofs in entities.items():
entity_ids[d][entity] = []
for dof in dofs:
if dof not in indices:
continue
entity_ids[d][entity].append(dof_counter)
dof_counter += 1
nodes.append(nodes_old[dof])
assert dof_counter == len(indices)
dual = DualSet(nodes, ref_el, entity_ids)
# Restrict mapping
mapping_old = element.mapping()
mapping_new = [mapping_old[dof] for dof in indices]
assert all(e_mapping == mapping_new[0] for e_mapping in mapping_new)
# Call constructor of CiarletElement
super(RestrictedElement, self).__init__(poly_set, dual, 0, element.get_formdegree(), mapping_new[0])
def sorted_by_key(mapping):
"Sort dict items by key, allowing different key types."
# Python3 doesn't allow comparing builtins of different type, therefore the typename trick here
def _key(x):
return (type(x[0]).__name__, x[0])
return sorted(mapping.items(), key=_key)
def _get_indices(element, restriction_domain):
"Restriction domain can be 'interior', 'vertex', 'edge', 'face' or 'facet'"
if restriction_domain == "interior":
# Return dofs from interior
return element.entity_dofs()[max(element.entity_dofs().keys())][0]
# otherwise return dofs with d <= dim
if restriction_domain == "vertex":
dim = 0
elif restriction_domain == "edge":
dim = 1
elif restriction_domain == "face":
dim = 2
elif restriction_domain == "facet":
dim = element.get_reference_element().get_spatial_dimension() - 1
else:
raise RuntimeError("Invalid restriction domain")
is_prodcell = isinstance(max(element.entity_dofs().keys()), tuple)
entity_dofs = element.entity_dofs()
indices = []
for d in range(dim + 1):
if is_prodcell:
for a in range(d + 1):
b = d - a
try:
entities = entity_dofs[(a, b)]
for (entity, index) in sorted_by_key(entities):
indices += index
except KeyError:
pass
else:
entities = entity_dofs[d]
for (entity, index) in sorted_by_key(entities):
indices += index
return indices
fiat-2019.2.0~git20210419.7d418fa/FIAT/serendipity.py 0000664 0000000 0000000 00000026302 14135323752 0021136 0 ustar 00root root 0000000 0000000 # Copyright (C) 2019 Cyrus Cheng (Imperial College London)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Modified by David A. Ham (david.ham@imperial.ac.uk), 2019
from sympy import symbols, legendre, Array, diff, lambdify
import numpy as np
from FIAT.finite_element import FiniteElement
from FIAT.lagrange import Lagrange
from FIAT.dual_set import make_entity_closure_ids
from FIAT.polynomial_set import mis
from FIAT.reference_element import (compute_unflattening_map,
flatten_reference_cube)
from FIAT.reference_element import make_lattice
from FIAT.pointwise_dual import compute_pointwise_dual
x, y, z = symbols('x y z')
variables = (x, y, z)
leg = legendre
def tr(n):
if n <= 1:
return 0
else:
return int((n-3)*(n-2)/2)
class Serendipity(FiniteElement):
def __new__(cls, ref_el, degree):
dim = ref_el.get_spatial_dimension()
if dim == 1:
return Lagrange(ref_el, degree)
elif dim == 0:
raise IndexError("reference element cannot be dimension 0")
else:
self = super().__new__(cls)
return self
def __init__(self, ref_el, degree):
flat_el = flatten_reference_cube(ref_el)
dim = flat_el.get_spatial_dimension()
flat_topology = flat_el.get_topology()
verts = flat_el.get_vertices()
dx = ((verts[-1][0] - x)/(verts[-1][0] - verts[0][0]), (x - verts[0][0])/(verts[-1][0] - verts[0][0]))
dy = ((verts[-1][1] - y)/(verts[-1][1] - verts[0][1]), (y - verts[0][1])/(verts[-1][1] - verts[0][1]))
x_mid = 2*x-(verts[-1][0] + verts[0][0])
y_mid = 2*y-(verts[-1][1] + verts[0][1])
try:
dz = ((verts[-1][2] - z)/(verts[-1][2] - verts[0][2]), (z - verts[0][2])/(verts[-1][2] - verts[0][2]))
z_mid = 2*z-(verts[-1][2] + verts[0][2])
except IndexError:
dz = None
z_mid = None
VL = v_lambda_0(dim, dx, dy, dz)
EL = []
FL = []
IL = []
s_list = []
entity_ids = {}
cur = 0
for top_dim, entities in flat_topology.items():
entity_ids[top_dim] = {}
for entity in entities:
entity_ids[top_dim][entity] = []
for j in sorted(flat_topology[0]):
entity_ids[0][j] = [cur]
cur = cur + 1
EL += e_lambda_0(degree, dim, dx, dy, dz, x_mid, y_mid, z_mid)
for j in sorted(flat_topology[1]):
entity_ids[1][j] = list(range(cur, cur + degree - 1))
cur = cur + degree - 1
FL += f_lambda_0(degree, dim, dx, dy, dz, x_mid, y_mid, z_mid)
for j in sorted(flat_topology[2]):
entity_ids[2][j] = list(range(cur, cur + tr(degree)))
cur = cur + tr(degree)
if dim == 3:
IL += i_lambda_0(degree, dx, dy, dz, x_mid, y_mid, z_mid)
entity_ids[3] = {}
entity_ids[3][0] = list(range(cur, cur + len(IL)))
cur = cur + len(IL)
s_list = VL + EL + FL + IL
assert len(s_list) == cur
formdegree = 0
super(Serendipity, self).__init__(ref_el=ref_el, dual=None, order=degree, formdegree=formdegree)
self.basis = {(0,)*dim: Array(s_list)}
self.basis_callable = {(0,)*dim: lambdify(variables[:dim], Array(s_list),
modules="numpy", dummify=True)}
topology = ref_el.get_topology()
unflattening_map = compute_unflattening_map(topology)
unflattened_entity_ids = {}
unflattened_entity_closure_ids = {}
entity_closure_ids = make_entity_closure_ids(flat_el, entity_ids)
for dim, entities in sorted(topology.items()):
unflattened_entity_ids[dim] = {}
unflattened_entity_closure_ids[dim] = {}
for dim, entities in sorted(flat_topology.items()):
for entity in entities:
unflat_dim, unflat_entity = unflattening_map[(dim, entity)]
unflattened_entity_ids[unflat_dim][unflat_entity] = entity_ids[dim][entity]
unflattened_entity_closure_ids[unflat_dim][unflat_entity] = entity_closure_ids[dim][entity]
self.entity_ids = unflattened_entity_ids
self.entity_closure_ids = unflattened_entity_closure_ids
self._degree = degree
self.flat_el = flat_el
self.dual = compute_pointwise_dual(self, unisolvent_pts(ref_el, degree))
def degree(self):
return self._degree + 1
def get_nodal_basis(self):
raise NotImplementedError("get_nodal_basis not implemented for serendipity")
def get_dual_set(self):
raise NotImplementedError("get_dual_set is not implemented for serendipity")
def get_coeffs(self):
raise NotImplementedError("get_coeffs not implemented for serendipity")
def tabulate(self, order, points, entity=None):
if entity is None:
entity = (self.ref_el.get_dimension(), 0)
entity_dim, entity_id = entity
transform = self.ref_el.get_entity_transform(entity_dim, entity_id)
points = list(map(transform, points))
phivals = {}
dim = self.flat_el.get_spatial_dimension()
if dim <= 1:
raise NotImplementedError('no tabulate method for serendipity elements of dimension 1 or less.')
if dim >= 4:
raise NotImplementedError('tabulate does not support higher dimensions than 3.')
points = np.asarray(points)
npoints, pointdim = points.shape
for o in range(order + 1):
alphas = mis(dim, o)
for alpha in alphas:
try:
callable = self.basis_callable[alpha]
except KeyError:
polynomials = diff(self.basis[(0,)*dim], *zip(variables, alpha))
callable = lambdify(variables[:dim], polynomials, modules="numpy", dummify=True)
self.basis[alpha] = polynomials
self.basis_callable[alpha] = callable
tabulation = callable(*(points[:, i] for i in range(pointdim)))
T = np.asarray([np.broadcast_to(tab, (npoints, ))
for tab in tabulation])
phivals[alpha] = T
return phivals
def entity_dofs(self):
"""Return the map of topological entities to degrees of
freedom for the finite element."""
return self.entity_ids
def entity_closure_dofs(self):
"""Return the map of topological entities to degrees of
freedom on the closure of those entities for the finite element."""
return self.entity_closure_ids
def value_shape(self):
return ()
def dmats(self):
raise NotImplementedError
def get_num_members(self, arg):
raise NotImplementedError
def space_dimension(self):
return len(self.basis[(0,)*self.flat_el.get_spatial_dimension()])
def v_lambda_0(dim, dx, dy, dz):
if dim == 2:
VL = [a*b for a in dx for b in dy]
else:
VL = [a*b*c for a in dx for b in dy for c in dz]
return VL
def e_lambda_0(i, dim, dx, dy, dz, x_mid, y_mid, z_mid):
if dim == 2:
EL = tuple([-leg(j, y_mid) * dy[0] * dy[1] * a for a in dx for j in range(i-1)] +
[-leg(j, x_mid) * dx[0] * dx[1] * b for b in dy for j in range(i-1)])
else:
EL = tuple([-leg(j, z_mid) * dz[0] * dz[1] * a * b for b in dx for a in dy for j in range(i-1)] +
[-leg(j, y_mid) * dy[0] * dy[1] * a * c for a in dx for c in dz for j in range(i-1)] +
[-leg(j, x_mid) * dx[0] * dx[1] * b * c for c in dy for b in dz for j in range(i-1)])
return EL
def f_lambda_0(i, dim, dx, dy, dz, x_mid, y_mid, z_mid):
if dim == 2:
FL = tuple([leg(j, x_mid) * leg(k-4-j, y_mid) * dx[0] * dx[1] * dy[0] * dy[1]
for k in range(4, i + 1) for j in range(k-3)])
else:
FL = tuple([leg(j, y_mid) * leg(k-4-j, z_mid) * dy[0] * dy[1] * dz[0] * dz[1] * a
for a in dx for k in range(4, i + 1) for j in range(k-3)] +
[leg(j, z_mid) * leg(k-4-j, x_mid) * dx[0] * dx[1] * dz[0] * dz[1] * b
for b in dy for k in range(4, i + 1) for j in range(k-3)] +
[leg(j, x_mid) * leg(k-4-j, y_mid) * dx[0] * dx[1] * dy[0] * dy[1] * c
for c in dz for k in range(4, i + 1) for j in range(k-3)])
return FL
def i_lambda_0(i, dx, dy, dz, x_mid, y_mid, z_mid):
IL = tuple([-leg(l-6-j, x_mid) * leg(j-k, y_mid) * leg(k, z_mid) *
dx[0] * dx[1] * dy[0] * dy[1] * dz[0] * dz[1]
for l in range(6, i + 1) for j in range(l-5) for k in range(j+1)])
return IL
def unisolvent_pts(K, deg):
flat_el = flatten_reference_cube(K)
dim = flat_el.get_spatial_dimension()
if dim == 2:
return unisolvent_pts_quad(flat_el, deg)
elif dim == 3:
return unisolvent_pts_hex(flat_el, deg)
else:
raise ValueError("Serendipity only defined for quads and hexes")
def unisolvent_pts_quad(K, deg):
"""Gives a set of unisolvent points for the quad serendipity space of order deg.
The S element is not dual to these nodes, but a dual basis can be constructed from them."""
L = K.construct_subelement(1)
vs = np.asarray(K.vertices)
pts = [pt for pt in K.vertices]
Lpts = make_lattice(L.vertices, deg, 1)
for e in K.topology[1]:
Fmap = K.get_entity_transform(1, e)
epts = [tuple(Fmap(pt)) for pt in Lpts]
pts.extend(epts)
if deg > 3:
dx0 = (vs[1, :] - vs[0, :]) / (deg-2)
dx1 = (vs[2, :] - vs[0, :]) / (deg-2)
internal_nodes = [tuple(vs[0, :] + dx0 * i + dx1 * j)
for i in range(1, deg-2)
for j in range(1, deg-1-i)]
pts.extend(internal_nodes)
return pts
def unisolvent_pts_hex(K, deg):
"""Gives a set of unisolvent points for the hex serendipity space of order deg.
The S element is not dual to these nodes, but a dual basis can be constructed from them."""
L = K.construct_subelement(1)
F = K.construct_subelement(2)
vs = np.asarray(K.vertices)
pts = [pt for pt in K.vertices]
Lpts = make_lattice(L.vertices, deg, 1)
for e in K.topology[1]:
Fmap = K.get_entity_transform(1, e)
epts = [tuple(Fmap(pt)) for pt in Lpts]
pts.extend(epts)
if deg > 3:
fvs = np.asarray(F.vertices)
# Planar points to map to each face
dx0 = (fvs[1, :] - fvs[0, :]) / (deg-2)
dx1 = (fvs[2, :] - fvs[0, :]) / (deg-2)
Fpts = [tuple(fvs[0, :] + dx0 * i + dx1 * j)
for i in range(1, deg-2)
for j in range(1, deg-1-i)]
for f in K.topology[2]:
Fmap = K.get_entity_transform(2, f)
pts.extend([tuple(Fmap(pt)) for pt in Fpts])
if deg > 5:
dx0 = np.asarray([1., 0, 0]) / (deg-4)
dx1 = np.asarray([0, 1., 0]) / (deg-4)
dx2 = np.asarray([0, 0, 1.]) / (deg-4)
Ipts = [tuple(vs[0, :] + dx0 * i + dx1 * j + dx2 * k)
for i in range(1, deg-4)
for j in range(1, deg-3-i)
for k in range(1, deg-2-i-j)]
pts.extend(Ipts)
return pts
fiat-2019.2.0~git20210419.7d418fa/FIAT/tensor_product.py 0000664 0000000 0000000 00000051505 14135323752 0021654 0 ustar 00root root 0000000 0000000 # Copyright (C) 2008 Robert C. Kirby (Texas Tech University)
# Copyright (C) 2013 Andrew T. T. McRae
# Modified by Thomas H. Gibson, 2016
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import numpy
from FIAT.finite_element import FiniteElement
from FIAT.reference_element import TensorProductCell, UFCQuadrilateral, UFCHexahedron, flatten_entities, compute_unflattening_map
from FIAT.dual_set import DualSet
from FIAT.polynomial_set import mis
from FIAT import dual_set
from FIAT import functional
def _first_point(node):
return tuple(node.get_point_dict().keys())[0]
def _first_point_pair(node):
return tuple(node.get_point_dict().items())[0]
class TensorProductElement(FiniteElement):
"""Class implementing a finite element that is the tensor product
of two existing finite elements."""
def __init__(self, A, B):
# set up simple things
order = min(A.get_order(), B.get_order())
if A.get_formdegree() is None or B.get_formdegree() is None:
formdegree = None
else:
formdegree = A.get_formdegree() + B.get_formdegree()
# set up reference element
ref_el = TensorProductCell(A.get_reference_element(),
B.get_reference_element())
if A.mapping()[0] != "affine" and B.mapping()[0] == "affine":
mapping = A.mapping()[0]
elif B.mapping()[0] != "affine" and A.mapping()[0] == "affine":
mapping = B.mapping()[0]
elif A.mapping()[0] == "affine" and B.mapping()[0] == "affine":
mapping = "affine"
else:
raise ValueError("check tensor product mappings - at least one must be affine")
# set up entity_ids
Adofs = A.entity_dofs()
Bdofs = B.entity_dofs()
Bsdim = B.space_dimension()
entity_ids = {}
for curAdim in Adofs:
for curBdim in Bdofs:
entity_ids[(curAdim, curBdim)] = {}
dim_cur = 0
for entityA in Adofs[curAdim]:
for entityB in Bdofs[curBdim]:
entity_ids[(curAdim, curBdim)][dim_cur] = \
[x*Bsdim + y for x in Adofs[curAdim][entityA]
for y in Bdofs[curBdim][entityB]]
dim_cur += 1
# set up dual basis
Anodes = A.dual_basis()
Bnodes = B.dual_basis()
# build the dual set by inspecting the current dual
# sets item by item.
# Currently supported cases:
# PointEval x PointEval = PointEval [scalar x scalar = scalar]
# PointScaledNormalEval x PointEval = PointScaledNormalEval [vector x scalar = vector]
# ComponentPointEvaluation x PointEval [vector x scalar = vector]
nodes = []
for Anode in Anodes:
if isinstance(Anode, functional.PointEvaluation):
for Bnode in Bnodes:
if isinstance(Bnode, functional.PointEvaluation):
# case: PointEval x PointEval
# the PointEval functional just requires the
# coordinates. these are currently stored as
# the key of a one-item dictionary. we retrieve
# these by calling get_point_dict(), and
# use the concatenation to make a new PointEval
nodes.append(functional.PointEvaluation(ref_el, _first_point(Anode) + _first_point(Bnode)))
elif isinstance(Bnode, functional.IntegralMoment):
# dummy functional for product with integral moments
nodes.append(functional.Functional(None, None, None,
{}, "Undefined"))
elif isinstance(Bnode, functional.PointDerivative):
# dummy functional for product with point derivative
nodes.append(functional.Functional(None, None, None,
{}, "Undefined"))
else:
raise NotImplementedError("unsupported functional type")
elif isinstance(Anode, functional.PointScaledNormalEvaluation):
for Bnode in Bnodes:
if isinstance(Bnode, functional.PointEvaluation):
# case: PointScaledNormalEval x PointEval
# this could be wrong if the second shape
# has spatial dimension >1, since we are not
# explicitly scaling by facet size
if len(_first_point(Bnode)) > 1:
# TODO: support this case one day
raise NotImplementedError("PointScaledNormalEval x PointEval is not yet supported if the second shape has dimension > 1")
# We cannot make a new functional.PSNEval in
# the natural way, since it tries to compute
# the normal vector by itself.
# Instead, we create things manually, and
# call Functional() with these arguments
sd = ref_el.get_spatial_dimension()
# The pt_dict is a one-item dictionary containing
# the details of the functional.
# The key is the spatial coordinate, which
# is just a concatenation of the two parts.
# The value is a list of tuples, representing
# the normal vector (scaled by the volume of
# the facet) at that point.
# Each tuple looks like (foo, (i,)); the i'th
# component of the scaled normal is foo.
# The following line is only valid when the second
# shape has spatial dimension 1 (enforced above)
Apoint, Avalue = _first_point_pair(Anode)
pt_dict = {Apoint + _first_point(Bnode): Avalue + [(0.0, (len(Apoint),))]}
# The following line should be used in the
# general case
# pt_dict = {Anode.get_point_dict().keys()[0] + Bnode.get_point_dict().keys()[0]: Anode.get_point_dict().values()[0] + [(0.0, (ii,)) for ii in range(len(Anode.get_point_dict().keys()[0]), len(Anode.get_point_dict().keys()[0]) + len(Bnode.get_point_dict().keys()[0]))]}
# THE FOLLOWING IS PROBABLY CORRECT BUT UNTESTED
shp = (sd,)
nodes.append(functional.Functional(ref_el, shp, pt_dict, {}, "PointScaledNormalEval"))
else:
raise NotImplementedError("unsupported functional type")
elif isinstance(Anode, functional.PointEdgeTangentEvaluation):
for Bnode in Bnodes:
if isinstance(Bnode, functional.PointEvaluation):
# case: PointEdgeTangentEval x PointEval
# this is very similar to the case above, so comments omitted
if len(_first_point(Bnode)) > 1:
raise NotImplementedError("PointEdgeTangentEval x PointEval is not yet supported if the second shape has dimension > 1")
sd = ref_el.get_spatial_dimension()
Apoint, Avalue = _first_point_pair(Anode)
pt_dict = {Apoint + _first_point(Bnode): Avalue + [(0.0, (len(Apoint),))]}
# THE FOLLOWING IS PROBABLY CORRECT BUT UNTESTED
shp = (sd,)
nodes.append(functional.Functional(ref_el, shp, pt_dict, {}, "PointEdgeTangent"))
else:
raise NotImplementedError("unsupported functional type")
elif isinstance(Anode, functional.ComponentPointEvaluation):
for Bnode in Bnodes:
if isinstance(Bnode, functional.PointEvaluation):
# case: ComponentPointEval x PointEval
# the CptPointEval functional requires the component
# and the coordinates. very similar to PE x PE case.
sd = ref_el.get_spatial_dimension()
nodes.append(functional.ComponentPointEvaluation(ref_el, Anode.comp, (sd,), _first_point(Anode) + _first_point(Bnode)))
else:
raise NotImplementedError("unsupported functional type")
elif isinstance(Anode, functional.FrobeniusIntegralMoment):
for Bnode in Bnodes:
if isinstance(Bnode, functional.PointEvaluation):
# case: FroIntMom x PointEval
sd = ref_el.get_spatial_dimension()
pt_dict = {}
pt_old = Anode.get_point_dict()
for pt in pt_old:
pt_dict[pt+_first_point(Bnode)] = pt_old[pt] + [(0.0, sd-1)]
# THE FOLLOWING IS PROBABLY CORRECT BUT UNTESTED
shp = (sd,)
nodes.append(functional.Functional(ref_el, shp, pt_dict, {}, "FrobeniusIntegralMoment"))
else:
raise NotImplementedError("unsupported functional type")
elif isinstance(Anode, functional.IntegralMoment):
for Bnode in Bnodes:
if isinstance(Bnode, functional.PointEvaluation):
# case: IntMom x PointEval
sd = ref_el.get_spatial_dimension()
pt_dict = {}
pt_old = Anode.get_point_dict()
for pt in pt_old:
pt_dict[pt+_first_point(Bnode)] = pt_old[pt]
# THE FOLLOWING IS PROBABLY CORRECT BUT UNTESTED
shp = (sd,)
nodes.append(functional.Functional(ref_el, shp, pt_dict, {}, "IntegralMoment"))
else:
raise NotImplementedError("unsupported functional type")
elif isinstance(Anode, functional.Functional):
# this should catch everything else
for Bnode in Bnodes:
nodes.append(functional.Functional(None, None, None, {}, "Undefined"))
else:
raise NotImplementedError("unsupported functional type")
dual = dual_set.DualSet(nodes, ref_el, entity_ids)
super(TensorProductElement, self).__init__(ref_el, dual, order, formdegree, mapping)
# Set up constituent elements
self.A = A
self.B = B
# degree for quadrature rule
self.polydegree = max(A.degree(), B.degree())
def degree(self):
"""Return the degree of the (embedding) polynomial space."""
return self.polydegree
def get_nodal_basis(self):
"""Return the nodal basis, encoded as a PolynomialSet object,
for the finite element."""
raise NotImplementedError("get_nodal_basis not implemented")
def get_coeffs(self):
"""Return the expansion coefficients for the basis of the
finite element."""
raise NotImplementedError("get_coeffs not implemented")
def tabulate(self, order, points, entity=None):
"""Return tabulated values of derivatives up to given order of
basis functions at given points."""
if entity is None:
entity = (self.ref_el.get_dimension(), 0)
entity_dim, entity_id = entity
shape = tuple(len(c.get_topology()[d])
for c, d in zip(self.ref_el.cells, entity_dim))
idA, idB = numpy.unravel_index(entity_id, shape)
# Factor the entity argument to get entities of the component elements
entityA_dim, entityB_dim = entity_dim
entityA = (entityA_dim, idA)
entityB = (entityB_dim, idB)
pointsAdim, pointsBdim = [c.get_spatial_dimension()
for c in self.ref_el.construct_subelement(entity_dim).cells]
pointsA = [point[:pointsAdim] for point in points]
pointsB = [point[pointsAdim:pointsAdim + pointsBdim] for point in points]
Asdim = self.A.ref_el.get_spatial_dimension()
Bsdim = self.B.ref_el.get_spatial_dimension()
# Note that for entities other than cells, the following
# tabulations are already appropriately zero-padded so no
# additional zero padding is required.
Atab = self.A.tabulate(order, pointsA, entityA)
Btab = self.B.tabulate(order, pointsB, entityB)
npoints = len(points)
# allow 2 scalar-valued FE spaces, or 1 scalar-valued,
# 1 vector-valued. Combining 2 vector-valued spaces
# into a tensor-valued space via an outer-product
# seems to be a sensible general option, but I don't
# know how to handle the nestedness of the arrays
# if someone then tries to make a new "tensor finite
# element" where one component is already a
# tensor-valued space!
A_valuedim = len(self.A.value_shape()) # scalar: 0, vector: 1
B_valuedim = len(self.B.value_shape()) # scalar: 0, vector: 1
if A_valuedim + B_valuedim > 1:
raise NotImplementedError("tabulate does not support two vector-valued inputs")
result = {}
for i in range(order + 1):
alphas = mis(Asdim+Bsdim, i) # thanks, Rob!
for alpha in alphas:
if A_valuedim == 0 and B_valuedim == 0:
# for each point, get outer product of (A's basis
# functions f1, f2, ... evaluated at that point)
# with (B's basis functions g1, g2, ... evaluated
# at that point). This gives temp[point][f_i][g_j].
# Flatten this, so bfs are
# in the order f1g1, f1g2, ..., f2g1, f2g2, ...
# which is compatible with the entity_dofs order.
# We now have temp[point][full basis function]
# Transpose this to get temp[bf][point],
# and we are done.
temp = numpy.array([numpy.outer(
Atab[alpha[0:Asdim]][..., j],
Btab[alpha[Asdim:Asdim+Bsdim]][..., j])
.ravel() for j in range(npoints)])
result[alpha] = temp.transpose()
elif A_valuedim == 1 and B_valuedim == 0:
# similar to above, except A's basis functions
# are now vector-valued. numpy.outer flattens the
# array, so it's like taking the OP of
# f1_x, f1_y, f2_x, f2_y, ... with g1, g2, ...
# this gives us
# temp[point][f1x, f1y, f2x, f2y, ...][g_j].
# reshape once to get temp[point][f_i][x/y][g_j]
# transpose to get temp[point][x/y][f_i][g_j]
# reshape to flatten the last two indices, this
# gives us temp[point][x/y][full bf_i]
# finally, transpose the first and last indices
# to get temp[bf_i][x/y][point], and we are done.
temp = numpy.array([numpy.outer(
Atab[alpha[0:Asdim]][..., j],
Btab[alpha[Asdim:Asdim+Bsdim]][..., j])
for j in range(npoints)])
assert temp.shape[1] % 2 == 0
temp2 = temp.reshape((temp.shape[0],
temp.shape[1]//2,
2,
temp.shape[2]))\
.transpose(0, 2, 1, 3)\
.reshape((temp.shape[0], 2, -1))\
.transpose(2, 1, 0)
result[alpha] = temp2
elif A_valuedim == 0 and B_valuedim == 1:
# as above, with B's functions now vector-valued.
# we now do... [numpy.outer ... for ...] gives
# temp[point][f_i][g1x,g1y,g2x,g2y,...].
# reshape to temp[point][f_i][g_j][x/y]
# flatten middle: temp[point][full bf_i][x/y]
# transpose to temp[bf_i][x/y][point]
temp = numpy.array([numpy.outer(
Atab[alpha[0:Asdim]][..., j],
Btab[alpha[Asdim:Asdim+Bsdim]][..., j])
for j in range(len(Atab[alpha[0:Asdim]][0]))])
assert temp.shape[2] % 2 == 0
temp2 = temp.reshape((temp.shape[0], temp.shape[1],
temp.shape[2]//2, 2))\
.reshape((temp.shape[0], -1, 2))\
.transpose(1, 2, 0)
result[alpha] = temp2
return result
def value_shape(self):
"""Return the value shape of the finite element functions."""
if len(self.A.value_shape()) == 0 and len(self.B.value_shape()) == 0:
return ()
elif len(self.A.value_shape()) == 1 and len(self.B.value_shape()) == 0:
return (self.A.value_shape()[0],)
elif len(self.A.value_shape()) == 0 and len(self.B.value_shape()) == 1:
return (self.B.value_shape()[0],)
else:
raise NotImplementedError("value_shape not implemented")
def dmats(self):
"""Return dmats: expansion coefficients for basis function
derivatives."""
raise NotImplementedError("dmats not implemented")
def get_num_members(self, arg):
"""Return number of members of the expansion set."""
raise NotImplementedError("get_num_members not implemented")
def is_nodal(self):
# This element is nodal iff all factor elements are nodal.
return all([self.A.is_nodal(), self.B.is_nodal()])
class FlattenedDimensions(FiniteElement):
"""A wrapper class that flattens entity dimensions of a FIAT element defined
on a TensorProductCell to one with quadrilateral/hexahedron entities.
TensorProductCell has dimension defined as a tuple of factor element dimensions
(i, j) in 2D and (i, j, k) in 3D.
Flattened dimension is a sum of the tuple elements."""
def __init__(self, element):
nodes = element.dual.nodes
dim = element.ref_el.get_spatial_dimension()
if dim == 2:
ref_el = UFCQuadrilateral()
elif dim == 3:
ref_el = UFCHexahedron()
else:
raise ValueError("Illegal element dimension %s" % dim)
entity_ids = element.dual.entity_ids
flat_entity_ids = flatten_entities(entity_ids)
dual = DualSet(nodes, ref_el, flat_entity_ids)
super(FlattenedDimensions, self).__init__(ref_el, dual, element.get_order(), element.get_formdegree(), element._mapping)
self.element = element
# Construct unflattening map for passing correct values to tabulate()
self.unflattening_map = compute_unflattening_map(self.element.ref_el.get_topology())
def degree(self):
"""Return the degree of the (embedding) polynomial space."""
return self.element.degree()
def tabulate(self, order, points, entity=None):
"""Return tabulated values of derivatives up to given order of
basis functions at given points."""
if entity is None:
entity = (self.get_reference_element().get_spatial_dimension(), 0)
# Entity is provided in flattened form (d, i)
# Appropriate product entity is taken from the unflattening_map dict
entity_dim, entity_id = entity
product_entity = self.unflattening_map[(entity_dim, entity_id)]
return self.element.tabulate(order, points, product_entity)
def value_shape(self):
"""Return the value shape of the finite element functions."""
return self.element.value_shape()
def get_nodal_basis(self):
"""Return the nodal basis, encoded as a PolynomialSet object,
for the finite element."""
raise self.element.get_nodal_basis()
def get_coeffs(self):
"""Return the expansion coefficients for the basis of the
finite element."""
raise self.element.get_coeffs()
def dmats(self):
"""Return dmats: expansion coefficients for basis function
derivatives."""
raise self.element.dmats()
def get_num_members(self, arg):
"""Return number of members of the expansion set."""
raise self.element.get_num_members(arg)
def is_nodal(self):
# This element is nodal iff unflattened element is nodal.
return self.element.is_nodal()
fiat-2019.2.0~git20210419.7d418fa/MANIFEST.in 0000664 0000000 0000000 00000000233 14135323752 0017233 0 ustar 00root root 0000000 0000000 include AUTHORS
include COPYING
include COPYING.LESSER
include ChangeLog
recursive-include doc *
recursive-include test *
global-exclude __pycache__ *.pyc
fiat-2019.2.0~git20210419.7d418fa/README.rst 0000664 0000000 0000000 00000004001 14135323752 0017161 0 ustar 00root root 0000000 0000000 ========================================
FIAT: FInite element Automatic Tabulator
========================================
The FInite element Automatic Tabulator FIAT supports generation of
arbitrary order instances of the Lagrange elements on lines,
triangles, and tetrahedra. It is also capable of generating arbitrary
order instances of Jacobi-type quadrature rules on the same element
shapes. Further, H(div) and H(curl) conforming finite element spaces
such as the families of Raviart-Thomas, Brezzi-Douglas-Marini and
Nedelec are supported on triangles and tetrahedra. Upcoming versions
will also support Hermite and nonconforming elements.
FIAT is part of the FEniCS Project.
For more information, visit http://www.fenicsproject.org
.. image:: https://github.com/FEniCS/fiat/workflows/FIAT%20CI/badge.svg
:target: https://github.com/FEniCS/fiat/actions?query=workflow%3A%22FIAT+CI%22
:alt: Build Status
.. image:: https://coveralls.io/repos/github/FEniCS/fiat/badge.svg?branch=master
:target: https://coveralls.io/github/FEniCS/fiat?branch=master
:alt: Coverage Status
.. image:: https://readthedocs.org/projects/fenics-fiat/badge/?version=latest
:target: http://fenics.readthedocs.io/projects/fiat/en/latest/?badge=latest
:alt: Documentation Status
Documentation
=============
Documentation can be viewed at http://fenics-fiat.readthedocs.org/.
License
=======
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see .
fiat-2019.2.0~git20210419.7d418fa/doc/ 0000775 0000000 0000000 00000000000 14135323752 0016244 5 ustar 00root root 0000000 0000000 fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/ 0000775 0000000 0000000 00000000000 14135323752 0017555 5 ustar 00root root 0000000 0000000 fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/Makefile 0000664 0000000 0000000 00000015347 14135323752 0021227 0 ustar 00root root 0000000 0000000 # Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make ' where is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/FIniteelementAutomaticTabulatorFIAT.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/FIniteelementAutomaticTabulatorFIAT.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/FIniteelementAutomaticTabulatorFIAT"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/FIniteelementAutomaticTabulatorFIAT"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/requirements.txt 0000664 0000000 0000000 00000000032 14135323752 0023034 0 ustar 00root root 0000000 0000000 numpy
sympy
sphinx==1.7.0
fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/source/ 0000775 0000000 0000000 00000000000 14135323752 0021055 5 ustar 00root root 0000000 0000000 fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/source/conf.py 0000664 0000000 0000000 00000022125 14135323752 0022356 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
#
# FInite element Automatic Tabulator (FIAT) documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 4 15:38:29 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import pkg_resources
import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'FInite element Automatic Tabulator (FIAT)'
this_year = datetime.date.today().year
copyright = u'%s, FEniCS Project' % this_year
version = pkg_resources.get_distribution("fenics-fiat").version
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# " v documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FIniteelementAutomaticTabulatorFIATdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'FIniteelementAutomaticTabulatorFIAT.tex', u'FInite element Automatic Tabulator (FIAT) Documentation',
u'FEniCS Project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'finiteelementautomatictabulatorfiat', u'FInite element Automatic Tabulator (FIAT) Documentation',
[u'FEniCS Project'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FIniteelementAutomaticTabulatorFIAT', u'FInite element Automatic Tabulator (FIAT) Documentation',
u'FEniCS Project', 'FIniteelementAutomaticTabulatorFIAT', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
def run_apidoc(_):
modules = ['FIAT']
# Get location of Sphinx files
sphinx_source_dir = os.path.abspath(os.path.dirname(__file__))
repo_dir = os.path.abspath(os.path.join(sphinx_source_dir, os.path.pardir,
os.path.pardir, os.path.pardir))
apidoc_dir = os.path.join(sphinx_source_dir, "api-doc")
from sphinx.ext.apidoc import main
for module in modules:
# Generate .rst files ready for autodoc
module_dir = os.path.join(repo_dir, module)
main(["-f", "-d", "1", "-o", apidoc_dir, module_dir])
def setup(app):
app.connect('builder-inited', run_apidoc)
fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/source/index.rst 0000664 0000000 0000000 00000001730 14135323752 0022717 0 ustar 00root root 0000000 0000000 .. title:: FIAT
========================================
FIAT: FInite element Automatic Tabulator
========================================
FIAT is a Python package for automatic generation of finite element
basis functions. It is capable of generating finite element basis
functions for a wide range of finite element families on simplices
(lines, triangles and tetrahedra), including the Lagrange elements,
and the elements of Raviart-Thomas, Brezzi-Douglas-Marini and Nedelec.
It is also capable of generating tensor-product elements and a number
more exotic elements, such as the Argyris, Hermite and Morley
elements.
FIAT is part of the FEniCS Project.
For more information, visit http://www.fenicsproject.org.
Documentation
=============
.. toctree::
:titlesonly:
:maxdepth: 1
installation
manual
API reference
releases
[FIXME: These links don't belong here, should go under API reference somehow.]
* :ref:`genindex`
* :ref:`modindex`
fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/source/installation.rst 0000664 0000000 0000000 00000002140 14135323752 0024305 0 ustar 00root root 0000000 0000000 .. title:: Installation
============
Installation
============
FIAT is normally installed as part of an installation of FEniCS.
If you are using FIAT as part of the FEniCS software suite, it
is recommended that you follow the
`installation instructions for FEniCS
`__.
To install FIAT itself, read on below for a list of requirements
and installation instructions.
Requirements and dependencies
=============================
FIAT requires Python version 2.7 or later and depends on the
following Python packages:
* NumPy
* SymPy
These packages will be automatically installed as part of the
installation of FIAT, if not already present on your system.
Installation instructions
=========================
To install FIAT, download the source code from the
`FIAT Bitbucket repository
`__,
and run the following command:
.. code-block:: console
pip install .
To install to a specific location, add the ``--prefix`` flag
to the installation command:
.. code-block:: console
pip install --prefix= .
fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/source/manual.rst 0000664 0000000 0000000 00000027643 14135323752 0023100 0 ustar 00root root 0000000 0000000 .. title:: User manual
===========
User manual
===========
.. note:: This page is work in progress and needs substantial editing.
FIAT (FInite element Automatic Tabulator) is a Python package for
defining and evaluating a wide range of different finite element basis
functions for numerical partial differential equations. It is
intended to make ``difficult'' elements such as high-order
Brezzi-Douglas-Marini [BDM]_ elements usable by providing
abstractions so that they may be implemented succinctly and hence
treated as a black box. FIAT is intended for use at two different
levels. For one, it is designed to provide a standard API for finite
element bases so that programmers may use whatever elements they need
in their code. At a lower level, it provides necessary infrastructure to
rapidly deploy new kinds of finite elements without expensive symbolic
computation or tedious algebraic manipulation.
It is my goal that a large number of people use FIAT without ever
knowing it. Thanks to several ongoing projects such as
Sundance [Sundance]_, FFC [FFC]_, and PETSc [PETSc]_, it is becoming
possible to to define finite element methods using mathematical
notation in some high-level or domain-specific language. The primary
shortcoming of these projects is their lack of support for general
elements. It is one thing to ``provide hooks'' for general elements,
but absent a tool such as FIAT, these hooks remain mainly empty. As
these projects mature, I hope to expose users of the finite element
method to the exotic world of potentially high-degree finite element
on unstructured grids using the best elements in :math:`H^1`,
:math:`H(\mathrm{div})`, and :math:`H(\mathrm{curl})`.
In this brief (and still developing) guide, I will first
present the high-level API for users who wish to instantiate a finite
element on a reference domain and evaluate its basis functions and
derivatives at some quadrature points. Then, I will explain some of
the underlying infrastructure so as to demonstrate how to add new
elements.
Using FIAT: A tutorial with Lagrange elements
=============================================
Importing FIAT
--------------
FIAT is organized as a package in Python, consisting of several
modules. In order to get some of the packages, we use the line ::
from FIAT import Lagrange, quadrature, shapes
This loads several modules for the Lagrange elements, quadrature
rules, and the simplicial element shapes which FIAT implements. The
roles each of these plays will become clear shortly.
Important note
--------------
Throughout, FIAT defines the reference elements based on the interval
:math:`(-1,1)` rather than the more common :math:`(0,1)`. So, the one-dimensional
reference element is :math:`(-1,1)`, the three vertices of the reference
triangle are :math:`(-1,-1),(1,-1),(1,-1)`, and the four vertices of the
reference tetrahedron are :math:`(-1,-1,-1),(1,-1,-1),(-1,1,-1),(-1,-1,1)`.
Instantiating elements
----------------------
FIAT uses a lightweight object-oriented infrastructure to define
finite elements. The ``Lagrange`` module contains a class
``Lagrange`` modeling the Lagrange finite element family. This
class is a subclass of some ``FiniteElement`` class contained in
another module (``polynomial`` to be precise). So, having imported
the ``Lagrange`` module, we can create the Lagrange element of
degree ``2`` on triangles by ::
shape = shapes.TRIANGLE
degree = 2
U = Lagrange.Lagrange( shape , degree )
Here, ``shapes.TRIANGLE`` is an integer code indicating the two
dimensional simplex. ``shapes`` also defines
``LINE`` and ``TETRAHEDRON``. Most of the
upper-level interface to FIAT is dimensionally abstracted over element
shape.
The class ``FiniteElement`` supports three methods, modeled on the
abstract definition of Ciarlet. These methods are
``domain_shape()``, ``function_space()``, and ``dual_basis()``.
The first of these returns the code for the shape and the second
returns the nodes of the finite element (including information related
to topological association of nodes with mesh entities, needed for
creating degree of freedom orderings).
Quadrature rules
================
FIAT implements arbitrary-order collapsed quadrature, as discussed in
Karniadakis and Sherwin~\cite{}, for the simplex of dimension one,
two, or three. The simplest way to get a quadrature rule is through
the function ```make_quadrature(shape,m)```, which takes a shape code
and an integer indicating the number of points per direction. For
building element matrices using quadratics, we will typically need a
second or third order integration rule, so we can get such a rule by ::
>>> Q = quadrature.make_quadrature( shape , 2 )
This uses two points in each direction on the reference square, then
maps them to the reference triangle. We may get a
``Numeric.array`` of the quadrature weights with the method
``Q.get_weights()`` and a list of tuples storing the quadrature
points with the method ``Q.get_points()``.
Tabulation
==========
FIAT provides functions for tabulating the element basis functions and
their derivatives. To get the ``FunctionSpace`` object, we do ::
>>> Ufs = U.function_space()
To get the values of each basis function at each of the quadrature
points, we use the ``tabulate()`` method
>>> Ufs.tabulate( Q.get_points() )
array([[ 0.22176167, -0.12319761, -0.11479229, -0.06377178],
[-0.11479229, -0.06377178, 0.22176167, -0.12319761],
[-0.10696938, 0.18696938, -0.10696938, 0.18696938],
[ 0.11074286, 0.19356495, 0.41329796, 0.72239423],
[ 0.41329796, 0.72239423, 0.11074286, 0.19356495],
[ 0.47595918, 0.08404082, 0.47595918, 0.08404082]])
This returns a two-dimensional ``Numeric.array`` with rows for each
basis function and columns for each input point.
Also, finite element codes require tabulation of the basis functions'
derivatives. Each ``FunctionSpace`` object also provides a method
``tabulate_jet(i,xs)`` that returns a list of Python dictionaries.
The ``i``th entry of the list is a dictionary storing the values of
all ``i``th order derivatives. Each dictionary maps a multiindex
(a tuple of length ``i``) to the table of the associated partial
derivatives of the basis functions at those points. For example, ::
>>> Ufs_jet = Ufs.tabulate_jet( 1 , Q.get_points() )
tabulates the zeroth and first partial derivatives of the function
space at the quadrature points. Then, ::
>>> Ufs_jet[0]
{(0, 0): array([[ 0.22176167, -0.12319761, -0.11479229, -0.06377178],
[-0.11479229, -0.06377178, 0.22176167, -0.12319761],
[-0.10696938, 0.18696938, -0.10696938, 0.18696938],
[ 0.11074286, 0.19356495, 0.41329796, 0.72239423],
[ 0.41329796, 0.72239423, 0.11074286, 0.19356495],
[ 0.47595918, 0.08404082, 0.47595918, 0.08404082]])}
gives us a dictionary mapping the only zeroth-order partial derivative
to the values of the basis functions at the quadrature points. More
interestingly, we may get the first derivatives in the x- and y-
directions with ::
>>> Ufs_jet[1][(1,0)]
array([[-0.83278049, -0.06003983, 0.14288254, 0.34993778],
[-0.14288254, -0.34993778, 0.83278049, 0.06003983],
[ 0. , 0. , 0. , 0. ],
[ 0.31010205, 1.28989795, 0.31010205, 1.28989795],
[-0.31010205, -1.28989795, -0.31010205, -1.28989795],
[ 0.97566304, 0.40997761, -0.97566304, -0.40997761]])
>>> Ufs_jet[1][(0,1)]
array([[ -8.32780492e-01, -6.00398310e-02, 1.42882543e-01, 3.49937780e-01],
[ 7.39494156e-17, 4.29608279e-17, 4.38075188e-17, 7.47961065e-17],
[ -1.89897949e-01, 7.89897949e-01, -1.89897949e-01, 7.89897949e-01],
[ 3.57117457e-01, 1.50062220e-01, 1.33278049e+00, 5.60039831e-01],
[ 1.02267844e+00, -7.29858118e-01, 4.70154051e-02, -1.13983573e+00],
[ -3.57117457e-01, -1.50062220e-01, -1.33278049e+00, -5.60039831e-01]])
Lower-level API
===============
Not only does FIAT provide a high-level library interface for users to
evaluate existing finite element bases, but it also provides
lower-level tools. Here, we survey these tools module-by-module.
shapes.py
---------
FIAT currenly only supports simplicial reference elements, but does so
in a fairly dimensionally-independent way (up to tetrahedra).
jacobi.py
---------
This is a low-level module that tabulates the Jacobi polynomials and
their derivatives, and also provides Gauss-Jacobi points. This module
will seldom if ever be imported directly by users. For more
information, consult the documentation strings and source code.
expansions.py
-------------
FIAT relies on orthonormal polynomial bases. These are constructed by
mapping appropriate Jacobi polynomials from the reference cube to the
reference simplex, as described in the reference of Karniadakis and
Sherwin~\cite{}. The module ``expansions.py`` implements these
orthonormal expansions. This is also a low-level module that will
infrequently be used directly, but it forms the backbone for the
module ``polynomial.py``.
quadrature.py
-------------
FIAT makes heavy use of numerical quadrature, both internally and in
the user interface. Internally, many function spaces or degrees of
freedom are defined in terms of integral quantities having certain
behavior. Keeping with the theme of arbitrary order approximations,
FIAT provides arbitrary order quadrature rules on the reference
simplices. These are constructed by mapping Gauss-Jacobi rules from
the reference cube. While these rules are suboptimal in terms of
order of accuracy achieved for a given number of points, they may be
generated mechanically in a simpler way than symmetric quadrature
rules. In the future, we hope to have the best symmetric existing
rules integrated into FIAT.
Unless one is modifying the quadrature rules available, all of the
functionality of ``quadrature.py`` may be accessed through the
single function ``make_quadrature``.
This function takes the code for a shape and the number of points in
each coordinate direction and returns a quadrature rule. Internally,
there is a lightweight class hierarchy rooted at an abstract
``QuadratureRule`` class, where the quadrature rules for
different shapes are actually different classes. However, the dynamic
typing of Python relieves the user from these considerations. The
interface to an instance consists in the following methods.
- ``get_points()``, which returns a list of the quadrature
points, each stored as a tuple. For dimensional uniformity,
one-dimensional quadrature rules are stored as lists of 1-tuples
rather than as lists of numbers.
- ``get_weights()``, which returns a ``Numeric.array``
of quadrature weights.
- ``integrate(f)``, which takes a callable object ``f``
and returns the (approximate) integral over the domain
- Also, the ``__call__`` method is overloaded so that a
quadrature rule may be applied to a callable object. This is
syntactic sugar on top of the ``integrate`` method.
polynomial.py
-------------
The ``polynomial`` module provides the bulk of the classes
needed to represent polynomial bases and finite element spaces.
The class ``PolynomialBase`` provides a high-level access to
the orthonormal expansion bases; it is typically not instantiated
directly in an application, but all other kinds of polynomial bases
are constructed as linear combinations of the members of a
``PolynomialBase`` instance. The module provides classes for
scalar and vector-valued polynomial sets, as well as an interface to individual
polynomials and finite element spaces.
PolynomialBase
^^^^^^^^^^^^^^
PolynomialSet
^^^^^^^^^^^^^
The ``PolynomialSet`` function is a factory function interface into
the hierarchy
.. [BDM] Brezzi, Franco; Douglas, Jim, Jr.; Marini, L. D. "Two families of mixed finite elements for second order elliptic problems". Numerische Mathematik. vol 47. no. 2. June 1985. 217—235. doi:10.1007/BF01389710
.. [Sundance] http://www.math.ttu.edu/~klong/Sundance/html/index.html
.. [FFC] https://bitbucket.org/fenics-project/ffc/src/master/
.. [PETSc] https://www.mcs.anl.gov/petsc/ fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/source/releases.rst 0000664 0000000 0000000 00000000450 14135323752 0023411 0 ustar 00root root 0000000 0000000 .. title:: Release notes
=============
Release notes
=============
.. toctree::
:maxdepth: 2
releases/next
releases/v2019.1.0
releases/v2018.1.0
releases/v2017.2.0
releases/v2017.1.0.post1
releases/v2017.1.0
releases/v2016.2.0
releases/v2016.1.0
releases/v1.6.0
fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/source/releases/ 0000775 0000000 0000000 00000000000 14135323752 0022660 5 ustar 00root root 0000000 0000000 fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/source/releases/next.rst 0000664 0000000 0000000 00000001024 14135323752 0024365 0 ustar 00root root 0000000 0000000 ===========================
Changes in the next release
===========================
Summary of changes
==================
- No changes yet.
.. note:: Developers should use this page to track and list changes
during development. At the time of release, this page should
be published (and renamed) to list the most important
changes in the new release.
Detailed changes
================
.. note:: At the time of release, make a verbatim copy of the
ChangeLog here (and remove this note).
fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/source/releases/v1.6.0.rst 0000664 0000000 0000000 00000000302 14135323752 0024235 0 ustar 00root root 0000000 0000000 ========================
Changes in version 1.6.0
========================
FIAT 1.6.0 was released on 2015-07-28.
- Support DG on facets through the element ``Discontinuous Lagrange
Trace``
fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/source/releases/v2016.1.0.rst 0000664 0000000 0000000 00000000216 14135323752 0024464 0 ustar 00root root 0000000 0000000 ===========================
Changes in version 2016.1.0
===========================
FIAT 2016.1.0 was released on 2016-06-23.
- Minor fixes
fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/source/releases/v2016.2.0.rst 0000664 0000000 0000000 00000005760 14135323752 0024476 0 ustar 00root root 0000000 0000000 ===========================
Changes in version 2016.2.0
===========================
FIAT 2016.2.0 was released on 2016-11-30.
Summary of changes
==================
- More elegant edge-based degrees of freedom are used for generalized Regge
finite elements. This is a internal change and is not visible to other parts
of FEniCS.
- The name of the mapping for generalized Regge finite element is changed to
"double covariant piola" from "pullback as metric". Geometrically, this
mapping is just the pullback of covariant 2-tensor fields in terms of proxy
matrix-fields. Because the mapping for 1-forms in FEniCS is currently named
"covariant piola", this mapping for symmetric tensor product of 1-forms is
thus called "double covariant piola". This change causes multiple internal
changes downstream in UFL and FFC. But this change should not be visible to
the end-user.
- Added support for the Hellan-Herrmann-Johnson element (symmetric matrix
fields with normal-normal continuity in 2D).
- Add method ``FiniteElement.is_nodal()`` for checking element nodality
- Add ``NodalEnrichedElement`` which merges dual bases (nodes) of given
elements and orthogonalizes basis for nodality
- Restructuring ``finite_element.py`` with the addition of a non-nodal class
``FiniteElement`` and a nodal class ``CiarletElement``. ``FiniteElement`` is
designed to be used to create elements where, in general, a nodal basis isn't
well-defined. ``CiarletElement`` implements the usual nodal abstraction of
a finite element.
- Removing ``trace.py`` and ``trace_hdiv.py`` with a new implementation of the
trace element of an HDiv-conforming element: ``HDivTrace``. It is also
mathematically equivalent to the former ``DiscontinuousLagrangeTrace``, that
is, the DG field defined only on co-dimension 1 entities.
- All nodal finite elements inherit from ``CiarletElement``, and the non-nodal
``TensorProductElement``, ``EnrichedElement`` and ``HDivTrace`` inherit from
``FiniteElement``.
Detailed changes
================
- Enable Travis CI on GitHub
- Add Firedrake quadrilateral cell
- Add tensor product cell
- Add facet -> cell coordinate transformation
- Add Bubble element
- Add discontinuous Taylor element
- Add broken element and H(div) trace element
- Add element restrictions onto mesh entities
- Add tensor product elements (for tensor product cells)
- Add H(div) and H(curl) element-modifiers for TPEs
- Add enriched element, i.e. sum of elements (e.g. for building Mini)
- Add multidimensional taylor elements
- Add Gauss Lobatto Legendre elements
- Finding non-vanishing DoFs on a facets
- Add tensor product quadrature rule
- Make regression tests working again after few years
- Prune modules having only __main__ code including transform_morley,
transform_hermite (ff86250820e2b18f7a0df471c97afa87207e9a7d)
- Remove newdubiner module (b3b120d40748961fdd0727a4e6c62450198d9647,
reference removed by cb65a84ac639977b7be04962cc1351481ca66124)
- Switch from homebrew factorial/gamma to math module (wraps C std lib)
fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/source/releases/v2017.1.0.post1.rst 0000664 0000000 0000000 00000000352 14135323752 0025533 0 ustar 00root root 0000000 0000000 =================================
Changes in version 2017.1.0.post1
=================================
FIAT 2017.1.0.post1 was released on 2017-09-12.
Summary of changes
==================
- Change PyPI package name to fenics-fiat.
fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/source/releases/v2017.1.0.rst 0000664 0000000 0000000 00000001161 14135323752 0024465 0 ustar 00root root 0000000 0000000 ===========================
Changes in version 2017.1.0
===========================
FIAT 2017.1.0 was released on 2017-05-09.
Summary of changes
==================
- Extended the discontinuous trace element ``HDivTrace`` to support tensor
product reference cells. Tabulating the trace defined on a tensor product
cell relies on the argument ``entity`` to specify a facet of the cell. The
backwards compatibility case ``entity=None`` does not support tensor product
tabulation as a result. Tabulating the trace of triangles or tetrahedron
remains unaffected and works as usual with or without an entity argument.
fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/source/releases/v2017.2.0.rst 0000664 0000000 0000000 00000000467 14135323752 0024476 0 ustar 00root root 0000000 0000000 ===========================
Changes in version 2017.2.0
===========================
FIAT 2017.2.0 was released on 2017-12-05.
Summary of changes
==================
- Add quadrilateral and hexahedron reference cells
- Add quadrilateral and hexahedron elements (with a wrapping class for TensorProductElement)
fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/source/releases/v2018.1.0.rst 0000664 0000000 0000000 00000000547 14135323752 0024475 0 ustar 00root root 0000000 0000000 ===========================
Changes in version 2018.1.0
===========================
FIAT 2018.1.0 was released on 2018-06-14.
Summary of changes
==================
- Remove Python 2 support
- Generalize ``Bubble`` element to ``CodimBubble`` to create bubbles on entity
of arbitrary codimension; add ``FacetBubble``, keep ``Bubble`` (as bubble on
cell)
fiat-2019.2.0~git20210419.7d418fa/doc/sphinx/source/releases/v2019.1.0.rst 0000664 0000000 0000000 00000000553 14135323752 0024473 0 ustar 00root root 0000000 0000000 ===========================
Changes in version 2019.1.0
===========================
FIAT 2019.1.0 was released on 2019-04-17.
Summary of changes
==================
- Added an implementation of the Bell finite element (K. Bell 1969
doi:10.1002/nme.1620010180), with extra basis functions for
transformation theory from Kirby (2018) doi:10.5802/smai-jcm.33.
fiat-2019.2.0~git20210419.7d418fa/setup.cfg 0000664 0000000 0000000 00000000677 14135323752 0017332 0 ustar 00root root 0000000 0000000 [flake8]
ignore = E501,E226,E731,W504,
E741 # ambiguous variable name
exclude = .git,__pycache__,doc/sphinx/source/conf.py,build,dist
min-version = 3.0
[pydocstyle]
# Work on removing these ignores
ignore = D100,D101,D102,D103,D104,D105,D107,
D200,D202,
D203, # this error should be disabled
D204,D205,D208,D209,D212,D213,
D300,
D400,D401,D404,D412,D415,D416
# convention = numpy
fiat-2019.2.0~git20210419.7d418fa/setup.py 0000775 0000000 0000000 00000001370 14135323752 0017215 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.version_info < (3, 0):
print("Python 3.0 or higher required, please upgrade.")
sys.exit(1)
version = "2019.2.0.dev0"
url = "https://bitbucket.org/fenics-project/fiat/"
tarball = None
if 'dev' not in version:
tarball = url + "downloads/fenics-fiat-%s.tar.gz" % version
setup(name="fenics-fiat",
description="FInite element Automatic Tabulator",
version=version,
author="Robert C. Kirby et al.",
author_email="fenics-dev@googlegroups.com",
url=url,
download_url=tarball,
license="LGPL v3 or later",
packages=["FIAT"],
install_requires=["numpy", "sympy"])
fiat-2019.2.0~git20210419.7d418fa/shippable.yml 0000664 0000000 0000000 00000000327 14135323752 0020173 0 ustar 00root root 0000000 0000000 language: python
python:
- 3.6
env:
- DATA_REPO_GIT=""
build:
ci:
- pip install --upgrade pip
- pip install flake8 pytest
- pip install .
- python -m flake8 .
- python -m pytest -v test/
fiat-2019.2.0~git20210419.7d418fa/test/ 0000775 0000000 0000000 00000000000 14135323752 0016456 5 ustar 00root root 0000000 0000000 fiat-2019.2.0~git20210419.7d418fa/test/README 0000664 0000000 0000000 00000000202 14135323752 0017330 0 ustar 00root root 0000000 0000000 Run tests by::
py.test [--skip-download]
py.test [--skip-download] regression/
py.test unit/
py.test unit/foo.py
fiat-2019.2.0~git20210419.7d418fa/test/conftest.py 0000664 0000000 0000000 00000001547 14135323752 0020664 0 ustar 00root root 0000000 0000000 # Copyright (C) 2016 Jan Blechta
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
def pytest_addoption(parser):
parser.addoption("--skip-download", dest='download', action='store_false',
help="do not download FIAT reference data")
fiat-2019.2.0~git20210419.7d418fa/test/regression/ 0000775 0000000 0000000 00000000000 14135323752 0020636 5 ustar 00root root 0000000 0000000 fiat-2019.2.0~git20210419.7d418fa/test/regression/README.rst 0000664 0000000 0000000 00000003741 14135323752 0022332 0 ustar 00root root 0000000 0000000 How to run regression tests
===========================
To run regression tests with default parameters, simply run::
cd /test/regression/
py.test
Look at test.py for more options.
How to update references
========================
To update the references for the FIAT regression tests, first commit
your changes, then run the regression test (to generate the new
references) and finally run the script upload::
cd /test/regression/
py.test
./scripts/upload
Note: You may be asked for your *Bitbucket* username and password when
uploading the reference data, if use of ssh keys fails.
Note: The upload script will push the new references to the
fiat-reference-data repository. This is harmless even if these
references are not needed later.
Note: The upload script will update the file fiat-regression-data-id
and commit this change to the currently active branch, remember to
include this commit when merging or pushing your changes elsewhere.
Note: You can cherry-pick the commit that updated
fiat-regression-data-id into another branch to use the same set of
references there.
Note: If you ever get merge conflicts in the fiat-regression-data-id,
always pick one version of the file. Most likely you'll need to update
the references again.
How to run regression tests against a different set of regression data
======================================================================
To run regression tests and compare to a different set of regression
data, perhaps to see what has changed in generated code since a
certain version, check out the fiat-regression-data-id file you want
and run tests as usual::
cd /test/regression/
git checkout fiat-regression-data-id
py.test
The test.py script will run scripts/download which will check out the
regression data with the commit id from fiat-regression-data-id in
fiat-regression-data/. Run::
DATA_REPO_GIT="" ./scripts/download/
to use https instead of ssh.
fiat-2019.2.0~git20210419.7d418fa/test/regression/conftest.py 0000664 0000000 0000000 00000002624 14135323752 0023041 0 ustar 00root root 0000000 0000000 # Copyright (C) 2016 Jan Blechta
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
import os
# Directories
path = os.path.dirname(os.path.abspath(__file__))
ref_path = os.path.join(path, 'fiat-reference-data')
download_script = os.path.join(path, 'scripts', 'download')
def pytest_configure(config):
# Download reference data
if config.getoption("download"):
failure = download_reference()
if failure:
raise RuntimeError("Download reference data failed")
print("Download reference data ok")
else:
print("Skipping reference data download")
if not os.path.exists(ref_path):
os.makedirs(ref_path)
def download_reference():
_path = os.getcwd()
os.chdir(path)
rc = os.system(download_script)
os.chdir(_path)
return rc
fiat-2019.2.0~git20210419.7d418fa/test/regression/fiat-reference-data-id 0000664 0000000 0000000 00000000051 14135323752 0024735 0 ustar 00root root 0000000 0000000 83d6c1d8f30d2c116398f496a4592ef541ea2843
fiat-2019.2.0~git20210419.7d418fa/test/regression/scripts/ 0000775 0000000 0000000 00000000000 14135323752 0022325 5 ustar 00root root 0000000 0000000 fiat-2019.2.0~git20210419.7d418fa/test/regression/scripts/download 0000775 0000000 0000000 00000002300 14135323752 0024055 0 ustar 00root root 0000000 0000000 #!/bin/bash
#
# Copyright (C) 2013 Anders Logg and Martin Sandve Alnaes
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
#
# Modified by Johannes Ring, 2013-04-23
#
# First added: 2013-04-22
# Last changed: 2013-08-20
#
# This script downloads the reference data for the FIAT regression tests
# and updates to the reference data version specified by the data id file.
# Parameters
source ./scripts/parameters
# Get updated reference repository
./scripts/getreferencerepo
if [ $? -ne 0 ]; then
exit 1
fi
# Checkout data referenced by id file
./scripts/getdata
if [ $? -ne 0 ]; then
exit 1
fi
fiat-2019.2.0~git20210419.7d418fa/test/regression/scripts/getdata 0000775 0000000 0000000 00000002215 14135323752 0023664 0 ustar 00root root 0000000 0000000 #!/bin/bash
#
# Copyright (C) 2013 Anders Logg and Martin Sandve Alnaes
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
#
# First added: 2013-04-22
# Last changed: 2013-08-21
#
# This script checks out reference data by the given commit id,
# or if none given using the commit id found in data id file.
# Parameters
source scripts/parameters
# Take data id as optional argument or get from file
DATA_ID=$1 && [ -z "$DATA_ID" ] && DATA_ID=`cat $DATA_ID_FILE`
# Checkout data referenced by id
(cd $DATA_DIR && git checkout -B auto $DATA_ID)
exit $?
fiat-2019.2.0~git20210419.7d418fa/test/regression/scripts/getreferencerepo 0000775 0000000 0000000 00000003677 14135323752 0025614 0 ustar 00root root 0000000 0000000 #!/bin/bash
#
# Copyright (C) 2013 Anders Logg and Martin Sandve Alnaes
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
#
# First added: 2013-04-22
# Last changed: 2013-08-21
#
# This script overwrites the reference data with the current output
# and stores the new reference data as part of the FIAT reference data
# repository.
# Parameters
source ./scripts/parameters
# Get reference repository
if [ ! -d "$DATA_DIR" ]; then
echo "Cloning reference data repository"
if [ -n "$DATA_REPO_GIT" ]; then
git clone $DATA_REPO_GIT
fi
if [ ! -d "$DATA_DIR" ]; then
git clone $DATA_REPO_HTTPS
fi
else
pushd $DATA_DIR
echo "Found existing reference data repository, pulling new data"
git checkout master
if [ $? -ne 0 ]; then
echo "Failed to checkout master, check state of reference data directory."
exit 1
fi
git fetch
if [ $? -ne 0 ]; then
echo "WARNING: Failed to fetch latest reference data from server."
else
git pull
if [ $? -ne 0 ]; then
echo "Failed to pull latest reference data from server, possibly a merge situation."
exit 1
fi
fi
popd
fi
# Check that we had success with getting reference repository
if [ ! -d "$DATA_DIR" ]; then
echo "Failed to update reference data directory '$DATA_DIR'."
exit 1
fi
fiat-2019.2.0~git20210419.7d418fa/test/regression/scripts/parameters 0000775 0000000 0000000 00000000450 14135323752 0024415 0 ustar 00root root 0000000 0000000 #OUTPUT_DIR="output"
[ ! -z ${DATA_REPO_GIT+x} ] || DATA_REPO_GIT="git@bitbucket.org:fenics-project/fiat-reference-data.git"
DATA_REPO_HTTPS="https://bitbucket.org/fenics-project/fiat-reference-data.git"
DATA_DIR="fiat-reference-data"
DATA_ID_FILE="fiat-reference-data-id"
OUTPUT_DIR="$DATA_DIR"
fiat-2019.2.0~git20210419.7d418fa/test/regression/scripts/upload 0000775 0000000 0000000 00000004227 14135323752 0023544 0 ustar 00root root 0000000 0000000 #!/bin/bash
#
# Copyright (C) 2013 Anders Logg and Martin Sandve Alnaes
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
#
# First added: 2013-04-22
# Last changed: 2013-08-21
#
# This script overwrites the reference data with the current output
# and stores the new reference data as part of the FIAT reference data
# repository. The commit id of the stored reference data is commited
# to a file in the main repo.
# Parameters
source ./scripts/parameters
# Get updated reference repository
./scripts/getreferencerepo
if [ $? -ne 0 ]; then
exit 1
fi
# Check that we have any data
if [ ! -d "$OUTPUT_DIR" ]; then
echo "Missing data directory '$OUTPUT_DIR'."
exit 1
fi
# Copy references
echo "Copying new reference data to $DATA_DIR"
rsync -r --exclude='README.rst' --exclude='*.bin' --exclude='*.cpp' $OUTPUT_DIR/ $DATA_DIR
echo ""
# Get current id for main repo (does not include dirty files, so not quite trustworthy!)
REPO_ID=`git rev-list --max-count 1 HEAD`
# Commit new data to reference repository
pushd $DATA_DIR
git add *
git commit -m "Update reference data, current project head is ${REPO_ID}." | grep -v "create mode"
if [ $? -ne 0 ]; then
echo "Failed to commit reference data."
exit 1
fi
DATA_ID=`git rev-list --max-count 1 HEAD`
popd
# Commit reference data commit id to file in main repo
echo $DATA_ID > $DATA_ID_FILE
git commit $DATA_ID_FILE -m"Update reference data pointer to ${DATA_ID}."
# Push references to server
pushd $DATA_DIR
git push
if [ $? -ne 0 ]; then
echo "WARNING: Failed to push new reference data to server."
fi
popd
fiat-2019.2.0~git20210419.7d418fa/test/regression/test_regression.py 0000664 0000000 0000000 00000030246 14135323752 0024434 0 ustar 00root root 0000000 0000000 # Copyright (C) 2010 Anders Logg, 2015 Jan Blechta
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
#
# First added: 2010-01-31
# Last changed: 2014-06-30
import pytest
import json
import numpy
import warnings
import os
from FIAT import supported_elements, make_quadrature, ufc_simplex, \
expansions, reference_element, polynomial_set
# Parameters
tolerance = 1e-8
# Directories
path = os.path.dirname(os.path.abspath(__file__))
ref_path = os.path.join(path, 'fiat-reference-data')
download_script = os.path.join(path, 'scripts', 'download')
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
# If numpy array, convert it to a list and store it in a dict.
if isinstance(obj, numpy.ndarray):
data = obj.tolist()
return dict(__ndarray__=data,
dtype=str(obj.dtype),
shape=obj.shape)
# Let the base class default method raise the TypeError
return json.JSONEncoder(self, obj)
def json_numpy_obj_hook(dct):
# If dict and have '__ndarray__' as a key, convert it back to ndarray.
if isinstance(dct, dict) and '__ndarray__' in dct:
return numpy.asarray(dct['__ndarray__']).reshape(dct['shape'])
return dct
def load_reference(filename, create_data):
"""Load reference from file. On failure create new file using supplied
function.
"""
try:
# Try loading the reference
reference = json.load(open(filename, "r"), object_hook=json_numpy_obj_hook)
except IOError:
warnings.warn('Reference file "%s" could not be loaded! '
'Creating a new reference file!' % filename,
RuntimeWarning)
# Generate data and store for the future
reference = create_data()
json.dump(reference, open(filename, "w"), cls=NumpyEncoder)
# Report failure
pytest.fail('Comparison to "%s" failed!' % filename)
return reference
def test_polynomials():
def create_data():
ps = polynomial_set.ONPolynomialSet(
ref_el=reference_element.DefaultTetrahedron(),
degree=3
)
return ps.dmats
# Try reading reference values
filename = os.path.join(ref_path, "reference-polynomials.json")
reference = load_reference(filename, create_data)
dmats = create_data()
for dmat, reference_dmat in zip(dmats, reference):
assert (abs(dmat - reference_dmat) < tolerance).all()
def test_polynomials_1D():
def create_data():
ps = polynomial_set.ONPolynomialSet(
ref_el=reference_element.DefaultLine(),
degree=3
)
return ps.dmats
# Try reading reference values
filename = os.path.join(ref_path, "reference-polynomials_1D.json")
reference = load_reference(filename, create_data)
dmats = create_data()
for dmat, reference_dmat in zip(dmats, reference):
assert (abs(dmat - reference_dmat) < tolerance).all()
def test_expansions():
def create_data():
E = reference_element.DefaultTriangle()
k = 3
pts = reference_element.make_lattice(E.get_vertices(), k)
Phis = expansions.get_expansion_set(E)
phis = Phis.tabulate(k, pts)
dphis = Phis.tabulate_derivatives(k, pts)
return phis, dphis
# Try reading reference values
filename = os.path.join(ref_path, "reference-expansions.json")
reference = load_reference(filename, create_data)
table_phi, table_dphi = create_data()
reference_table_phi, reference_table_dphi = reference
# Test raw point data
diff = numpy.array(table_phi) - numpy.array(reference_table_phi)
assert (abs(diff) < tolerance).all()
# Test derivative values
for entry, reference_entry in zip(table_dphi, reference_table_dphi):
for point, reference_point in zip(entry, reference_entry):
value, gradient = point[0], point[1]
reference_value, reference_gradient = \
reference_point[0], reference_point[1]
assert abs(value - reference_value) < tolerance
diff = numpy.array(gradient) - numpy.array(reference_gradient)
assert (abs(diff) < tolerance).all()
def test_expansions_jet():
def create_data():
latticeK = 2
n = 1
order = 2
E = reference_element.DefaultTetrahedron()
pts = reference_element.make_lattice(E.get_vertices(), latticeK)
F = expansions.TetrahedronExpansionSet(E)
return F.tabulate_jet(n, pts, order)
filename = os.path.join(ref_path, "reference-expansions-jet.json")
reference = load_reference(filename, create_data)
# Test jet data
data = create_data()
for datum, reference_datum in zip(data, reference):
diff = numpy.array(datum) - numpy.array(reference_datum)
assert (abs(diff) < tolerance).all()
@pytest.fixture(scope="module")
def quadrature_reference_data():
filename = os.path.join(ref_path, "reference.json")
try:
reference = json.load(open(filename, "r"), object_hook=json_numpy_obj_hook)
except IOError:
warnings.warn('Reference file "%s" could not be loaded! '
'Creating a new reference file!' % filename,
RuntimeWarning)
# No reference data
reference = {}
original = reference.copy()
# Kind of ugly, we rely on destructively modifying the test data
# we produce here to check if we're missing something (and
# therefore have to regenerate the reference data).
yield reference
if original != reference:
# Tests introduced some new data, so dump to disk.
with open(filename, "w") as f:
json.dump(reference, f, cls=NumpyEncoder)
def quadrature_test_case_name(test_case):
family, dim, degree = test_case
return "{}({}, {})".format(family,
{1: "interval",
2: "triangle",
3: "tetrahedron"}[dim],
degree)
@pytest.fixture(params=[("Lagrange", 1, 1),
("Lagrange", 1, 2),
("Lagrange", 1, 3),
("Lagrange", 2, 1),
("Lagrange", 2, 2),
("Lagrange", 2, 3),
("Lagrange", 3, 1),
("Lagrange", 3, 2),
("Lagrange", 3, 3),
("Discontinuous Lagrange", 1, 0),
("Discontinuous Lagrange", 1, 1),
("Discontinuous Lagrange", 1, 2),
("Discontinuous Lagrange", 2, 0),
("Discontinuous Lagrange", 2, 1),
("Discontinuous Lagrange", 2, 2),
("Discontinuous Lagrange", 3, 0),
("Discontinuous Lagrange", 3, 1),
("Discontinuous Lagrange", 3, 2),
("Discontinuous Taylor", 1, 0),
("Discontinuous Taylor", 1, 1),
("Discontinuous Taylor", 1, 2),
("Brezzi-Douglas-Marini", 2, 1),
("Brezzi-Douglas-Marini", 2, 2),
("Brezzi-Douglas-Marini", 2, 3),
("Brezzi-Douglas-Marini", 3, 1),
("Brezzi-Douglas-Marini", 3, 2),
("Brezzi-Douglas-Marini", 3, 3),
("Brezzi-Douglas-Fortin-Marini", 2, 2),
("Raviart-Thomas", 2, 1),
("Raviart-Thomas", 2, 2),
("Raviart-Thomas", 2, 3),
("Raviart-Thomas", 3, 1),
("Raviart-Thomas", 3, 2),
("Raviart-Thomas", 3, 3),
("Discontinuous Raviart-Thomas", 2, 1),
("Discontinuous Raviart-Thomas", 2, 2),
("Discontinuous Raviart-Thomas", 2, 3),
("Discontinuous Raviart-Thomas", 3, 1),
("Discontinuous Raviart-Thomas", 3, 2),
("Discontinuous Raviart-Thomas", 3, 3),
("Nedelec 1st kind H(curl)", 2, 1),
("Nedelec 1st kind H(curl)", 2, 2),
("Nedelec 1st kind H(curl)", 2, 3),
("Nedelec 1st kind H(curl)", 3, 1),
("Nedelec 1st kind H(curl)", 3, 2),
("Nedelec 1st kind H(curl)", 3, 3),
("Nedelec 2nd kind H(curl)", 2, 1),
("Nedelec 2nd kind H(curl)", 2, 2),
("Nedelec 2nd kind H(curl)", 2, 3),
("Nedelec 2nd kind H(curl)", 3, 1),
("Nedelec 2nd kind H(curl)", 3, 2),
("Nedelec 2nd kind H(curl)", 3, 3),
("Crouzeix-Raviart", 1, 1),
("Crouzeix-Raviart", 2, 1),
("Crouzeix-Raviart", 3, 1),
("Regge", 2, 0),
("Regge", 2, 1),
("Regge", 2, 2),
("Regge", 3, 0),
("Regge", 3, 1),
("Regge", 3, 2),
("Bubble", 2, 3),
("Bubble", 2, 4),
("Bubble", 2, 5),
("Bubble", 3, 4),
("Bubble", 3, 5),
("Bubble", 3, 6),
("Hellan-Herrmann-Johnson", 2, 0),
("Hellan-Herrmann-Johnson", 2, 1),
("Hellan-Herrmann-Johnson", 2, 2)],
ids=quadrature_test_case_name)
def quadrature_test_case(request):
return request.param
def test_quadrature(quadrature_reference_data, quadrature_test_case):
num_points = 3
max_derivative = 3
# Combinations of (family, dim, degree) to test
def create_data(family, dim, degree):
'''Create the reference data.
'''
# Get domain and element class
domain = ufc_simplex(dim)
ElementClass = supported_elements[family]
# Create element
element = ElementClass(domain, degree)
# Create quadrature points
quad_rule = make_quadrature(domain, num_points)
points = quad_rule.get_points()
# Tabulate at quadrature points
table = element.tabulate(max_derivative, points)
return table
def _perform_test(family, dim, degree, *, reference_table=None):
'''Test against reference data.
'''
table = create_data(family, dim, degree)
# Check against reference
for dtuple in reference_table:
assert eval(dtuple) in table
assert table[eval(dtuple)].shape == reference_table[dtuple].shape
diff = table[eval(dtuple)] - reference_table[dtuple]
assert (abs(diff) < tolerance).all(), \
"quadrature case %s %s %s failed!" % (family, dim, degree)
test_case = quadrature_test_case
try:
reference = quadrature_reference_data[str(test_case)]
_perform_test(*test_case, reference_table=reference)
except KeyError:
warnings.warn('Reference file does not contain reference "%s"! '
'Creating a new reference file!'
% (str(test_case)), RuntimeWarning)
ref = dict([(str(k), v) for k, v in create_data(*test_case).items()])
quadrature_reference_data[str(test_case)] = ref
pytest.fail('No reference data for "%s" available' % str(test_case))
if __name__ == '__main__':
pytest.main(os.path.abspath(__file__))
fiat-2019.2.0~git20210419.7d418fa/test/unit/ 0000775 0000000 0000000 00000000000 14135323752 0017435 5 ustar 00root root 0000000 0000000 fiat-2019.2.0~git20210419.7d418fa/test/unit/test_awc.py 0000664 0000000 0000000 00000011241 14135323752 0021617 0 ustar 00root root 0000000 0000000 import numpy as np
from FIAT import ufc_simplex, ArnoldWinther, make_quadrature, expansions
def test_dofs():
line = ufc_simplex(1)
T = ufc_simplex(2)
T.vertices = np.random.rand(3, 2)
AW = ArnoldWinther(T, 3)
# check Kronecker property at vertices
bases = [[[1, 0], [0, 0]], [[0, 1], [1, 0]], [[0, 0], [0, 1]]]
vert_vals = AW.tabulate(0, T.vertices)[(0, 0)]
for i in range(3):
for j in range(3):
assert np.allclose(vert_vals[3*i+j, :, :, i], bases[j])
for k in (1, 2):
assert np.allclose(vert_vals[3*i+j, :, :, (i+k) % 3], np.zeros((2, 2)))
# check edge moments
Qline = make_quadrature(line, 6)
linebfs = expansions.LineExpansionSet(line)
linevals = linebfs.tabulate(1, Qline.pts)
# n, n moments
for ed in range(3):
n = T.compute_scaled_normal(ed)
wts = np.asarray(Qline.wts)
nqpline = len(wts)
vals = AW.tabulate(0, Qline.pts, (1, ed))[(0, 0)]
nnvals = np.zeros((30, nqpline))
for i in range(30):
for j in range(len(wts)):
nnvals[i, j] = n @ vals[i, :, :, j] @ n
nnmoments = np.zeros((30, 2))
for bf in range(30):
for k in range(nqpline):
for m in (0, 1):
nnmoments[bf, m] += wts[k] * nnvals[bf, k] * linevals[m, k]
for bf in range(30):
if bf != AW.dual.entity_ids[1][ed][0] and bf != AW.dual.entity_ids[1][ed][2]:
assert np.allclose(nnmoments[bf, :], np.zeros(2))
# n, t moments
for ed in range(3):
n = T.compute_scaled_normal(ed)
t = T.compute_edge_tangent(ed)
wts = np.asarray(Qline.wts)
nqpline = len(wts)
vals = AW.tabulate(0, Qline.pts, (1, ed))[(0, 0)]
ntvals = np.zeros((30, nqpline))
for i in range(30):
for j in range(len(wts)):
ntvals[i, j] = n @ vals[i, :, :, j] @ t
ntmoments = np.zeros((30, 2))
for bf in range(30):
for k in range(nqpline):
for m in (0, 1):
ntmoments[bf, m] += wts[k] * ntvals[bf, k] * linevals[m, k]
for bf in range(30):
if bf != AW.dual.entity_ids[1][ed][1] and bf != AW.dual.entity_ids[1][ed][3]:
assert np.allclose(ntmoments[bf, :], np.zeros(2))
# check internal dofs
Q = make_quadrature(T, 6)
qpvals = AW.tabulate(0, Q.pts)[(0, 0)]
const_moms = qpvals @ Q.wts
assert np.allclose(const_moms[:21], np.zeros((21, 2, 2)))
assert np.allclose(const_moms[24:], np.zeros((6, 2, 2)))
assert np.allclose(const_moms[21:24, 0, 0], np.asarray([1, 0, 0]))
assert np.allclose(const_moms[21:24, 0, 1], np.asarray([0, 1, 0]))
assert np.allclose(const_moms[21:24, 1, 0], np.asarray([0, 1, 0]))
assert np.allclose(const_moms[21:24, 1, 1], np.asarray([0, 0, 1]))
def frob(a, b):
return a.ravel() @ b.ravel()
def test_projection():
T = ufc_simplex(2)
T.vertices = np.asarray([(0.0, 0.0), (1.0, 0.0), (0.5, 2.1)])
AW = ArnoldWinther(T, 3)
Q = make_quadrature(T, 4)
qpts = np.asarray(Q.pts)
qwts = np.asarray(Q.wts)
nqp = len(Q.wts)
nbf = 24
m = np.zeros((nbf, nbf))
b = np.zeros((24,))
rhs_vals = np.zeros((2, 2, nqp))
bfvals = AW.tabulate(0, qpts)[(0, 0)][:nbf, :, :, :]
for i in range(nbf):
for j in range(nbf):
for k in range(nqp):
m[i, j] += qwts[k] * frob(bfvals[i, :, :, k],
bfvals[j, :, :, k])
assert np.linalg.cond(m) < 1.e12
comps = [(0, 0), (0, 1), (0, 0)]
# loop over monomials up to degree 2
for deg in range(3):
for jj in range(deg+1):
ii = deg-jj
for comp in comps:
b[:] = 0.0
# set RHS (symmetrically) to be the monomial in
# the proper component.
rhs_vals[comp] = qpts[:, 0]**ii * qpts[:, 1]**jj
rhs_vals[tuple(reversed(comp))] = rhs_vals[comp]
for i in range(nbf):
for k in range(nqp):
b[i] += qwts[k] * frob(bfvals[i, :, :, k],
rhs_vals[:, :, k])
x = np.linalg.solve(m, b)
sol_at_qpts = np.zeros(rhs_vals.shape)
for i in range(nbf):
for k in range(nqp):
sol_at_qpts[:, :, k] += x[i] * bfvals[i, :, :, k]
diff = sol_at_qpts - rhs_vals
err = 0.0
for k in range(nqp):
err += qwts[k] * frob(diff[:, :, k], diff[:, :, k])
assert np.sqrt(err) < 1.e-12
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_awnc.py 0000664 0000000 0000000 00000004706 14135323752 0022005 0 ustar 00root root 0000000 0000000 import numpy as np
from FIAT import ufc_simplex, ArnoldWintherNC, make_quadrature, expansions
def test_dofs():
line = ufc_simplex(1)
T = ufc_simplex(2)
T.vertices = np.random.rand(3, 2)
AW = ArnoldWintherNC(T, 2)
Qline = make_quadrature(line, 6)
linebfs = expansions.LineExpansionSet(line)
linevals = linebfs.tabulate(1, Qline.pts)
# n, n moments
for ed in range(3):
n = T.compute_scaled_normal(ed)
wts = np.asarray(Qline.wts)
nqpline = len(wts)
vals = AW.tabulate(0, Qline.pts, (1, ed))[(0, 0)]
nnvals = np.zeros((18, nqpline))
for i in range(18):
for j in range(len(wts)):
nnvals[i, j] = n @ vals[i, :, :, j] @ n
nnmoments = np.zeros((18, 2))
for bf in range(18):
for k in range(nqpline):
for m in (0, 1):
nnmoments[bf, m] += wts[k] * nnvals[bf, k] * linevals[m, k]
for bf in range(18):
if bf != AW.dual.entity_ids[1][ed][0] and bf != AW.dual.entity_ids[1][ed][2]:
assert np.allclose(nnmoments[bf, :], np.zeros(2))
# n, t moments
for ed in range(3):
n = T.compute_scaled_normal(ed)
t = T.compute_edge_tangent(ed)
wts = np.asarray(Qline.wts)
nqpline = len(wts)
vals = AW.tabulate(0, Qline.pts, (1, ed))[(0, 0)]
ntvals = np.zeros((18, nqpline))
for i in range(18):
for j in range(len(wts)):
ntvals[i, j] = n @ vals[i, :, :, j] @ t
ntmoments = np.zeros((18, 2))
for bf in range(18):
for k in range(nqpline):
for m in (0, 1):
ntmoments[bf, m] += wts[k] * ntvals[bf, k] * linevals[m, k]
for bf in range(18):
if bf != AW.dual.entity_ids[1][ed][1] and bf != AW.dual.entity_ids[1][ed][3]:
assert np.allclose(ntmoments[bf, :], np.zeros(2), atol=1.e-7)
# check internal dofs
Q = make_quadrature(T, 6)
qpvals = AW.tabulate(0, Q.pts)[(0, 0)]
const_moms = qpvals @ Q.wts
assert np.allclose(const_moms[:12], np.zeros((12, 2, 2)))
assert np.allclose(const_moms[15:], np.zeros((3, 2, 2)))
assert np.allclose(const_moms[12:15, 0, 0], np.asarray([1, 0, 0]))
assert np.allclose(const_moms[12:15, 0, 1], np.asarray([0, 1, 0]))
assert np.allclose(const_moms[12:15, 1, 0], np.asarray([0, 1, 0]))
assert np.allclose(const_moms[12:15, 1, 1], np.asarray([0, 0, 1]))
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_bernstein.py 0000664 0000000 0000000 00000006537 14135323752 0023052 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Miklós Homolya
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with FIAT. If not, see .
import numpy
import pytest
from FIAT.reference_element import ufc_simplex
from FIAT.bernstein import Bernstein
from FIAT.quadrature_schemes import create_quadrature
D02 = numpy.array([
[0.65423405, 1.39160021, 0.65423405, 3.95416573, 1.39160021, 3.95416573],
[3.95416573, 3.95416573, 1.39160021, 1.39160021, 0.65423405, 0.65423405],
[0.0831321, -2.12896637, 2.64569763, -7.25409741, 1.17096531, -6.51673126],
[0., 0., 0., 0., 0., 0.],
[-7.90833147, -7.90833147, -2.78320042, -2.78320042, -1.30846811, -1.30846811],
[-2.12896637, 0.0831321, -7.25409741, 2.64569763, -6.51673126, 1.17096531],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[3.95416573, 3.95416573, 1.39160021, 1.39160021, 0.65423405, 0.65423405],
[1.39160021, 0.65423405, 3.95416573, 0.65423405, 3.95416573, 1.39160021],
])
D11 = numpy.array([
[0.65423405, 1.39160021, 0.65423405, 3.95416573, 1.39160021, 3.95416573],
[3.29993168, 2.56256552, 0.73736616, -2.56256552, -0.73736616, -3.29993168],
[0.73736616, -0.73736616, 3.29993168, -3.29993168, 2.56256552, -2.56256552],
[-3.95416573, -3.95416573, -1.39160021, -1.39160021, -0.65423405, -0.65423405],
[-4.69153189, -3.21679958, -4.69153189, 1.90833147, -3.21679958, 1.90833147],
[-1.39160021, -0.65423405, -3.95416573, -0.65423405, -3.95416573, -1.39160021],
[0., 0., 0., 0., 0., 0.],
[3.95416573, 3.95416573, 1.39160021, 1.39160021, 0.65423405, 0.65423405],
[1.39160021, 0.65423405, 3.95416573, 0.65423405, 3.95416573, 1.39160021],
[0., 0., 0., 0., 0., 0.],
])
D20 = numpy.array([
[0.65423405, 1.39160021, 0.65423405, 3.95416573, 1.39160021, 3.95416573],
[2.64569763, 1.17096531, 0.0831321, -6.51673126, -2.12896637, -7.25409741],
[1.39160021, 0.65423405, 3.95416573, 0.65423405, 3.95416573, 1.39160021],
[-7.25409741, -6.51673126, -2.12896637, 1.17096531, 0.0831321, 2.64569763],
[-2.78320042, -1.30846811, -7.90833147, -1.30846811, -7.90833147, -2.78320042],
[0., 0., 0., 0., 0., 0.],
[3.95416573, 3.95416573, 1.39160021, 1.39160021, 0.65423405, 0.65423405],
[1.39160021, 0.65423405, 3.95416573, 0.65423405, 3.95416573, 1.39160021],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
])
def test_bernstein_2nd_derivatives():
ref_el = ufc_simplex(2)
degree = 3
elem = Bernstein(ref_el, degree)
rule = create_quadrature(ref_el, degree)
points = rule.get_points()
actual = elem.tabulate(2, points)
assert numpy.allclose(D02, actual[(0, 2)])
assert numpy.allclose(D11, actual[(1, 1)])
assert numpy.allclose(D20, actual[(2, 0)])
if __name__ == '__main__':
import os
pytest.main(os.path.abspath(__file__))
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_discontinuous_pc.py 0000664 0000000 0000000 00000003433 14135323752 0024441 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 Imperial College London and others
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
#
# Authors:
#
# David Ham and Cyrus Cheng
import pytest
import numpy as np
@pytest.mark.parametrize("dim, degree", [(dim, degree)
for dim in range(1, 4)
for degree in range(6)])
def test_basis_values(dim, degree):
"""Ensure that integrating a simple monomial produces the expected results."""
from FIAT import ufc_cell, make_quadrature
from FIAT.discontinuous_pc import DPC
cell = np.array([None, 'interval', 'quadrilateral', 'hexahedron'])
s = ufc_cell(cell[dim])
q = make_quadrature(s, degree + 1)
fe = DPC(s, degree)
tab = fe.tabulate(0, q.pts)[(0,) * dim]
for test_degree in range(degree + 1):
coefs = [n(lambda x: x[0]**test_degree) for n in fe.dual.nodes]
integral = np.float(np.dot(coefs, np.dot(tab, q.wts)))
reference = np.dot([x[0]**test_degree
for x in q.pts], q.wts)
assert np.isclose(integral, reference, rtol=1e-14)
if __name__ == '__main__':
import os
pytest.main(os.path.abspath(__file__))
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_discontinuous_taylor.py 0000664 0000000 0000000 00000003277 14135323752 0025357 0 ustar 00root root 0000000 0000000 # Copyright (C) 2016 Imperial College London and others
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
#
# Authors:
#
# David Ham
import pytest
import numpy as np
@pytest.mark.parametrize("dim, degree", [(dim, degree)
for dim in range(1, 4)
for degree in range(4)])
def test_basis_values(dim, degree):
"""Ensure that integrating a simple monomial produces the expected results."""
from FIAT import ufc_simplex, DiscontinuousTaylor, make_quadrature
s = ufc_simplex(dim)
q = make_quadrature(s, degree + 1)
fe = DiscontinuousTaylor(s, degree)
tab = fe.tabulate(0, q.pts)[(0,) * dim]
for test_degree in range(degree + 1):
coefs = [n(lambda x: x[0]**test_degree) for n in fe.dual.nodes]
integral = np.float(np.dot(coefs, np.dot(tab, q.wts)))
reference = np.dot([x[0]**test_degree
for x in q.pts], q.wts)
assert np.isclose(integral, reference, rtol=1e-14)
if __name__ == '__main__':
import os
pytest.main(os.path.abspath(__file__))
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_facet_support_dofs.py 0000664 0000000 0000000 00000016535 14135323752 0024751 0 ustar 00root root 0000000 0000000 # Copyright (C) 2016 Miklos Homolya
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
import pytest
import FIAT
from FIAT.reference_element import UFCInterval, UFCTriangle
from FIAT.finite_element import entity_support_dofs
@pytest.mark.parametrize(('base', 'extr', 'horiz_expected', 'vert_expected'),
[(("Discontinuous Lagrange", 0), ("Discontinuous Lagrange", 0),
{0: [0], 1: [0]},
{0: [0], 1: [0]}),
(("Discontinuous Lagrange", 1), ("Discontinuous Lagrange", 1),
{0: [0, 2], 1: [1, 3]},
{0: [0, 1], 1: [2, 3]}),
(("Lagrange", 1), ("Lagrange", 1),
{0: [0, 2], 1: [1, 3]},
{0: [0, 1], 1: [2, 3]}),
(("Discontinuous Lagrange", 0), ("Lagrange", 1),
{0: [0], 1: [1]},
{0: [0, 1], 1: [0, 1]}),
(("Lagrange", 1), ("Discontinuous Lagrange", 0),
{0: [0, 1], 1: [0, 1]},
{0: [0], 1: [1]})])
def test_quad(base, extr, horiz_expected, vert_expected):
elem_A = FIAT.supported_elements[base[0]](UFCInterval(), base[1])
elem_B = FIAT.supported_elements[extr[0]](UFCInterval(), extr[1])
elem = FIAT.TensorProductElement(elem_A, elem_B)
assert horiz_expected == entity_support_dofs(elem, (1, 0))
assert vert_expected == entity_support_dofs(elem, (0, 1))
def test_quad_rtce():
W0_h = FIAT.Lagrange(UFCInterval(), 1)
W1_h = FIAT.DiscontinuousLagrange(UFCInterval(), 0)
W0_v = FIAT.DiscontinuousLagrange(UFCInterval(), 0)
W0 = FIAT.Hcurl(FIAT.TensorProductElement(W0_h, W0_v))
W1_v = FIAT.Lagrange(UFCInterval(), 1)
W1 = FIAT.Hcurl(FIAT.TensorProductElement(W1_h, W1_v))
elem = FIAT.EnrichedElement(W0, W1)
assert {0: [0, 1, 2], 1: [0, 1, 3]} == entity_support_dofs(elem, (1, 0))
assert {0: [0, 2, 3], 1: [1, 2, 3]} == entity_support_dofs(elem, (0, 1))
def test_quad_rtcf():
W0_h = FIAT.Lagrange(UFCInterval(), 1)
W1_h = FIAT.DiscontinuousLagrange(UFCInterval(), 0)
W0_v = FIAT.DiscontinuousLagrange(UFCInterval(), 0)
W0 = FIAT.Hdiv(FIAT.TensorProductElement(W0_h, W0_v))
W1_v = FIAT.Lagrange(UFCInterval(), 1)
W1 = FIAT.Hdiv(FIAT.TensorProductElement(W1_h, W1_v))
elem = FIAT.EnrichedElement(W0, W1)
assert {0: [0, 1, 2], 1: [0, 1, 3]} == entity_support_dofs(elem, (1, 0))
assert {0: [0, 2, 3], 1: [1, 2, 3]} == entity_support_dofs(elem, (0, 1))
@pytest.mark.parametrize(('base', 'extr', 'horiz_expected', 'vert_expected'),
[(("Discontinuous Lagrange", 0), ("Discontinuous Lagrange", 0),
{0: [0], 1: [0]},
{0: [0], 1: [0], 2: [0]}),
(("Discontinuous Lagrange", 1), ("Discontinuous Lagrange", 1),
{0: [0, 2, 4], 1: [1, 3, 5]},
{0: [2, 3, 4, 5], 1: [0, 1, 4, 5], 2: [0, 1, 2, 3]}),
(("Lagrange", 1), ("Lagrange", 1),
{0: [0, 2, 4], 1: [1, 3, 5]},
{0: [2, 3, 4, 5], 1: [0, 1, 4, 5], 2: [0, 1, 2, 3]}),
(("Discontinuous Lagrange", 0), ("Lagrange", 1),
{0: [0], 1: [1]},
{0: [0, 1], 1: [0, 1], 2: [0, 1]}),
(("Lagrange", 1), ("Discontinuous Lagrange", 0),
{0: [0, 1, 2], 1: [0, 1, 2]},
{0: [1, 2], 1: [0, 2], 2: [0, 1]})])
def test_prism(base, extr, horiz_expected, vert_expected):
elem_A = FIAT.supported_elements[base[0]](UFCTriangle(), base[1])
elem_B = FIAT.supported_elements[extr[0]](UFCInterval(), extr[1])
elem = FIAT.TensorProductElement(elem_A, elem_B)
assert horiz_expected == entity_support_dofs(elem, (2, 0))
assert vert_expected == entity_support_dofs(elem, (1, 1))
@pytest.mark.parametrize(('space', 'degree', 'horiz_expected', 'vert_expected'),
[("Raviart-Thomas", 1,
{0: [0, 1, 2, 3], 1: [0, 1, 2, 4]},
{0: list(range(5)), 1: list(range(5)), 2: list(range(5))}),
("Brezzi-Douglas-Marini", 1,
{0: [0, 1, 2, 3, 4, 5, 6], 1: [0, 1, 2, 3, 4, 5, 7]},
{0: list(range(8)), 1: list(range(8)), 2: list(range(8))})])
def test_prism_hdiv(space, degree, horiz_expected, vert_expected):
W0_h = FIAT.supported_elements[space](UFCTriangle(), degree)
W1_h = FIAT.DiscontinuousLagrange(UFCTriangle(), degree - 1)
W0_v = FIAT.DiscontinuousLagrange(UFCInterval(), degree - 1)
W0 = FIAT.Hdiv(FIAT.TensorProductElement(W0_h, W0_v))
W1_v = FIAT.Lagrange(UFCInterval(), degree)
W1 = FIAT.Hdiv(FIAT.TensorProductElement(W1_h, W1_v))
elem = FIAT.EnrichedElement(W0, W1)
assert horiz_expected == entity_support_dofs(elem, (2, 0))
assert vert_expected == entity_support_dofs(elem, (1, 1))
@pytest.mark.parametrize(('space', 'degree', 'horiz_expected', 'vert_expected'),
[("Raviart-Thomas", 1,
{0: [0, 1, 2, 3, 5, 7], 1: [0, 1, 2, 4, 6, 8]},
{0: [1, 2] + list(range(3, 9)),
1: [0, 2] + list(range(3, 9)),
2: [0, 1] + list(range(3, 9))}),
("Brezzi-Douglas-Marini", 1,
{0: list(range(3)) + list(range(3, 15, 2)),
1: list(range(3)) + list(range(4, 15, 2))},
{0: [1, 2] + list(range(3, 15)),
1: [0, 2] + list(range(3, 15)),
2: [0, 1] + list(range(3, 15))})])
def test_prism_hcurl(space, degree, horiz_expected, vert_expected):
W0_h = FIAT.Lagrange(UFCTriangle(), degree)
W1_h = FIAT.supported_elements[space](UFCTriangle(), degree)
W0_v = FIAT.DiscontinuousLagrange(UFCInterval(), degree - 1)
W0 = FIAT.Hcurl(FIAT.TensorProductElement(W0_h, W0_v))
W1_v = FIAT.Lagrange(UFCInterval(), degree)
W1 = FIAT.Hcurl(FIAT.TensorProductElement(W1_h, W1_v))
elem = FIAT.EnrichedElement(W0, W1)
assert horiz_expected == entity_support_dofs(elem, (2, 0))
assert vert_expected == entity_support_dofs(elem, (1, 1))
def test_discontinuous_element():
elem = FIAT.DiscontinuousElement(FIAT.Lagrange(UFCTriangle(), 3))
assert entity_support_dofs(elem, 1) == {0: [1, 2, 3, 4],
1: [0, 2, 5, 6],
2: [0, 1, 7, 8]}
if __name__ == '__main__':
import os
pytest.main(os.path.abspath(__file__))
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_fiat.py 0000664 0000000 0000000 00000046717 14135323752 0022010 0 ustar 00root root 0000000 0000000 # Copyright (C) 2015-2016 Jan Blechta
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
import random
import numpy as np
import pytest
from FIAT.reference_element import LINE, ReferenceElement
from FIAT.reference_element import Point, UFCInterval, UFCTriangle, UFCTetrahedron
from FIAT.lagrange import Lagrange
from FIAT.discontinuous_lagrange import DiscontinuousLagrange # noqa: F401
from FIAT.discontinuous_taylor import DiscontinuousTaylor # noqa: F401
from FIAT.P0 import P0 # noqa: F401
from FIAT.crouzeix_raviart import CrouzeixRaviart # noqa: F401
from FIAT.raviart_thomas import RaviartThomas # noqa: F401
from FIAT.discontinuous_raviart_thomas import DiscontinuousRaviartThomas # noqa: F401
from FIAT.brezzi_douglas_marini import BrezziDouglasMarini # noqa: F401
from FIAT.mixed import MixedElement
from FIAT.nedelec import Nedelec # noqa: F401
from FIAT.nedelec_second_kind import NedelecSecondKind # noqa: F401
from FIAT.regge import Regge # noqa: F401
from FIAT.hdiv_trace import HDivTrace, map_to_reference_facet # noqa: F401
from FIAT.hellan_herrmann_johnson import HellanHerrmannJohnson # noqa: F401
from FIAT.brezzi_douglas_fortin_marini import BrezziDouglasFortinMarini # noqa: F401
from FIAT.gauss_legendre import GaussLegendre # noqa: F401
from FIAT.gauss_lobatto_legendre import GaussLobattoLegendre # noqa: F401
from FIAT.restricted import RestrictedElement # noqa: F401
from FIAT.tensor_product import TensorProductElement # noqa: F401
from FIAT.tensor_product import FlattenedDimensions # noqa: F401
from FIAT.hdivcurl import Hdiv, Hcurl # noqa: F401
from FIAT.argyris import Argyris, QuinticArgyris # noqa: F401
from FIAT.hermite import CubicHermite # noqa: F401
from FIAT.morley import Morley # noqa: F401
from FIAT.bubble import Bubble
from FIAT.enriched import EnrichedElement # noqa: F401
from FIAT.nodal_enriched import NodalEnrichedElement
P = Point()
I = UFCInterval() # noqa: E741
T = UFCTriangle()
S = UFCTetrahedron()
def test_basis_derivatives_scaling():
"Regression test for issue #9"
class Interval(ReferenceElement):
def __init__(self, a, b):
verts = ((a,), (b,))
edges = {0: (0, 1)}
topology = {0: {0: (0,), 1: (1,)},
1: edges}
super(Interval, self).__init__(LINE, verts, topology)
random.seed(42)
for i in range(26):
a = 1000.0*(random.random() - 0.5)
b = 1000.0*(random.random() - 0.5)
a, b = min(a, b), max(a, b)
interval = Interval(a, b)
element = Lagrange(interval, 1)
points = [(a,), (0.5*(a+b),), (b,)]
tab = element.get_nodal_basis().tabulate(points, 2)
# first basis function
assert np.isclose(tab[(0,)][0][0], 1.0)
assert np.isclose(tab[(0,)][0][1], 0.5)
assert np.isclose(tab[(0,)][0][2], 0.0)
# second basis function
assert np.isclose(tab[(0,)][1][0], 0.0)
assert np.isclose(tab[(0,)][1][1], 0.5)
assert np.isclose(tab[(0,)][1][2], 1.0)
# first and second derivatives
D = 1.0 / (b - a)
for p in range(len(points)):
assert np.isclose(tab[(1,)][0][p], -D)
assert np.isclose(tab[(1,)][1][p], +D)
assert np.isclose(tab[(2,)][0][p], 0.0)
assert np.isclose(tab[(2,)][1][p], 0.0)
xfail_impl = lambda element: pytest.param(element, marks=pytest.mark.xfail(strict=True, raises=NotImplementedError))
elements = [
"Lagrange(I, 1)",
"Lagrange(I, 2)",
"Lagrange(I, 3)",
"Lagrange(T, 1)",
"Lagrange(T, 2)",
"Lagrange(T, 3)",
"Lagrange(S, 1)",
"Lagrange(S, 2)",
"Lagrange(S, 3)",
"P0(I)",
"P0(T)",
"P0(S)",
"DiscontinuousLagrange(P, 0)",
"DiscontinuousLagrange(I, 0)",
"DiscontinuousLagrange(I, 1)",
"DiscontinuousLagrange(I, 2)",
"DiscontinuousLagrange(T, 0)",
"DiscontinuousLagrange(T, 1)",
"DiscontinuousLagrange(T, 2)",
"DiscontinuousLagrange(S, 0)",
"DiscontinuousLagrange(S, 1)",
"DiscontinuousLagrange(S, 2)",
"DiscontinuousTaylor(I, 0)",
"DiscontinuousTaylor(I, 1)",
"DiscontinuousTaylor(I, 2)",
"DiscontinuousTaylor(T, 0)",
"DiscontinuousTaylor(T, 1)",
"DiscontinuousTaylor(T, 2)",
"DiscontinuousTaylor(S, 0)",
"DiscontinuousTaylor(S, 1)",
"DiscontinuousTaylor(S, 2)",
"CrouzeixRaviart(I, 1)",
"CrouzeixRaviart(T, 1)",
"CrouzeixRaviart(S, 1)",
"RaviartThomas(T, 1)",
"RaviartThomas(T, 2)",
"RaviartThomas(T, 3)",
"RaviartThomas(S, 1)",
"RaviartThomas(S, 2)",
"RaviartThomas(S, 3)",
'RaviartThomas(T, 1, variant="integral")',
'RaviartThomas(T, 2, variant="integral")',
'RaviartThomas(T, 3, variant="integral")',
'RaviartThomas(S, 1, variant="integral")',
'RaviartThomas(S, 2, variant="integral")',
'RaviartThomas(S, 3, variant="integral")',
'RaviartThomas(T, 1, variant="integral(2)")',
'RaviartThomas(T, 2, variant="integral(3)")',
'RaviartThomas(T, 3, variant="integral(4)")',
'RaviartThomas(S, 1, variant="integral(2)")',
'RaviartThomas(S, 2, variant="integral(3)")',
'RaviartThomas(S, 3, variant="integral(4)")',
'RaviartThomas(T, 1, variant="point")',
'RaviartThomas(T, 2, variant="point")',
'RaviartThomas(T, 3, variant="point")',
'RaviartThomas(S, 1, variant="point")',
'RaviartThomas(S, 2, variant="point")',
'RaviartThomas(S, 3, variant="point")',
"DiscontinuousRaviartThomas(T, 1)",
"DiscontinuousRaviartThomas(T, 2)",
"DiscontinuousRaviartThomas(T, 3)",
"DiscontinuousRaviartThomas(S, 1)",
"DiscontinuousRaviartThomas(S, 2)",
"DiscontinuousRaviartThomas(S, 3)",
"BrezziDouglasMarini(T, 1)",
"BrezziDouglasMarini(T, 2)",
"BrezziDouglasMarini(T, 3)",
"BrezziDouglasMarini(S, 1)",
"BrezziDouglasMarini(S, 2)",
"BrezziDouglasMarini(S, 3)",
'BrezziDouglasMarini(T, 1, variant="integral")',
'BrezziDouglasMarini(T, 2, variant="integral")',
'BrezziDouglasMarini(T, 3, variant="integral")',
'BrezziDouglasMarini(S, 1, variant="integral")',
'BrezziDouglasMarini(S, 2, variant="integral")',
'BrezziDouglasMarini(S, 3, variant="integral")',
'BrezziDouglasMarini(T, 1, variant="integral(2)")',
'BrezziDouglasMarini(T, 2, variant="integral(3)")',
'BrezziDouglasMarini(T, 3, variant="integral(4)")',
'BrezziDouglasMarini(S, 1, variant="integral(2)")',
'BrezziDouglasMarini(S, 2, variant="integral(3)")',
'BrezziDouglasMarini(S, 3, variant="integral(4)")',
'BrezziDouglasMarini(T, 1, variant="point")',
'BrezziDouglasMarini(T, 2, variant="point")',
'BrezziDouglasMarini(T, 3, variant="point")',
'BrezziDouglasMarini(S, 1, variant="point")',
'BrezziDouglasMarini(S, 2, variant="point")',
'BrezziDouglasMarini(S, 3, variant="point")',
"Nedelec(T, 1)",
"Nedelec(T, 2)",
"Nedelec(T, 3)",
"Nedelec(S, 1)",
"Nedelec(S, 2)",
"Nedelec(S, 3)",
'Nedelec(T, 1, variant="integral")',
'Nedelec(T, 2, variant="integral")',
'Nedelec(T, 3, variant="integral")',
'Nedelec(S, 1, variant="integral")',
'Nedelec(S, 2, variant="integral")',
'Nedelec(S, 3, variant="integral")',
'Nedelec(T, 1, variant="integral(2)")',
'Nedelec(T, 2, variant="integral(3)")',
'Nedelec(T, 3, variant="integral(4)")',
'Nedelec(S, 1, variant="integral(2)")',
'Nedelec(S, 2, variant="integral(3)")',
'Nedelec(S, 3, variant="integral(4)")',
'Nedelec(T, 1, variant="point")',
'Nedelec(T, 2, variant="point")',
'Nedelec(T, 3, variant="point")',
'Nedelec(S, 1, variant="point")',
'Nedelec(S, 2, variant="point")',
'Nedelec(S, 3, variant="point")',
"NedelecSecondKind(T, 1)",
"NedelecSecondKind(T, 2)",
"NedelecSecondKind(T, 3)",
"NedelecSecondKind(S, 1)",
"NedelecSecondKind(S, 2)",
"NedelecSecondKind(S, 3)",
'NedelecSecondKind(T, 1, variant="integral")',
'NedelecSecondKind(T, 2, variant="integral")',
'NedelecSecondKind(T, 3, variant="integral")',
'NedelecSecondKind(S, 1, variant="integral")',
'NedelecSecondKind(S, 2, variant="integral")',
'NedelecSecondKind(S, 3, variant="integral")',
'NedelecSecondKind(T, 1, variant="integral(2)")',
'NedelecSecondKind(T, 2, variant="integral(3)")',
'NedelecSecondKind(T, 3, variant="integral(4)")',
'NedelecSecondKind(S, 1, variant="integral(2)")',
'NedelecSecondKind(S, 2, variant="integral(3)")',
'NedelecSecondKind(S, 3, variant="integral(4)")',
'NedelecSecondKind(T, 1, variant="point")',
'NedelecSecondKind(T, 2, variant="point")',
'NedelecSecondKind(T, 3, variant="point")',
'NedelecSecondKind(S, 1, variant="point")',
'NedelecSecondKind(S, 2, variant="point")',
'NedelecSecondKind(S, 3, variant="point")',
"Regge(T, 0)",
"Regge(T, 1)",
"Regge(T, 2)",
"Regge(S, 0)",
"Regge(S, 1)",
"Regge(S, 2)",
"HellanHerrmannJohnson(T, 0)",
"HellanHerrmannJohnson(T, 1)",
"HellanHerrmannJohnson(T, 2)",
"BrezziDouglasFortinMarini(T, 2)",
"GaussLegendre(I, 0)",
"GaussLegendre(I, 1)",
"GaussLegendre(I, 2)",
"GaussLobattoLegendre(I, 1)",
"GaussLobattoLegendre(I, 2)",
"GaussLobattoLegendre(I, 3)",
"Bubble(I, 2)",
"Bubble(T, 3)",
"Bubble(S, 4)",
"RestrictedElement(Lagrange(I, 2), restriction_domain='facet')",
"RestrictedElement(Lagrange(T, 2), restriction_domain='vertex')",
"RestrictedElement(Lagrange(T, 3), restriction_domain='facet')",
"NodalEnrichedElement(Lagrange(I, 1), Bubble(I, 2))",
"NodalEnrichedElement(Lagrange(T, 1), Bubble(T, 3))",
"NodalEnrichedElement(Lagrange(S, 1), Bubble(S, 4))",
"NodalEnrichedElement("
" RaviartThomas(T, 1),"
" RestrictedElement(RaviartThomas(T, 2), restriction_domain='interior')"
")",
"NodalEnrichedElement("
" Regge(S, 1),"
" RestrictedElement(Regge(S, 2), restriction_domain='interior')"
")",
"Argyris(T, 5)",
"QuinticArgyris(T)",
"CubicHermite(I)",
"CubicHermite(T)",
"CubicHermite(S)",
"Morley(T)",
# MixedElement made of nodal elements should be nodal, but its API
# is currently just broken.
xfail_impl("MixedElement(["
" DiscontinuousLagrange(T, 1),"
" RaviartThomas(T, 2)"
"])"),
# Following element do not bother implementing get_nodal_basis
# so the test would need to be rewritten using tabulate
xfail_impl("TensorProductElement(DiscontinuousLagrange(I, 1), Lagrange(I, 2))"),
xfail_impl("Hdiv(TensorProductElement(DiscontinuousLagrange(I, 1), Lagrange(I, 2)))"),
xfail_impl("Hcurl(TensorProductElement(DiscontinuousLagrange(I, 1), Lagrange(I, 2)))"),
xfail_impl("HDivTrace(T, 1)"),
xfail_impl("EnrichedElement("
"Hdiv(TensorProductElement(Lagrange(I, 1), DiscontinuousLagrange(I, 0))), "
"Hdiv(TensorProductElement(DiscontinuousLagrange(I, 0), Lagrange(I, 1)))"
")"),
xfail_impl("EnrichedElement("
"Hcurl(TensorProductElement(Lagrange(I, 1), DiscontinuousLagrange(I, 0))), "
"Hcurl(TensorProductElement(DiscontinuousLagrange(I, 0), Lagrange(I, 1)))"
")"),
# Following elements are checked using tabulate
xfail_impl("HDivTrace(T, 0)"),
xfail_impl("HDivTrace(T, 1)"),
xfail_impl("HDivTrace(T, 2)"),
xfail_impl("HDivTrace(T, 3)"),
xfail_impl("HDivTrace(S, 0)"),
xfail_impl("HDivTrace(S, 1)"),
xfail_impl("HDivTrace(S, 2)"),
xfail_impl("HDivTrace(S, 3)"),
xfail_impl("TensorProductElement(Lagrange(I, 1), Lagrange(I, 1))"),
xfail_impl("TensorProductElement(Lagrange(I, 2), Lagrange(I, 2))"),
xfail_impl("TensorProductElement(TensorProductElement(Lagrange(I, 1), Lagrange(I, 1)), Lagrange(I, 1))"),
xfail_impl("TensorProductElement(TensorProductElement(Lagrange(I, 2), Lagrange(I, 2)), Lagrange(I, 2))"),
xfail_impl("FlattenedDimensions(TensorProductElement(Lagrange(I, 1), Lagrange(I, 1)))"),
xfail_impl("FlattenedDimensions(TensorProductElement(Lagrange(I, 2), Lagrange(I, 2)))"),
xfail_impl("FlattenedDimensions(TensorProductElement(FlattenedDimensions(TensorProductElement(Lagrange(I, 1), Lagrange(I, 1))), Lagrange(I, 1)))"),
xfail_impl("FlattenedDimensions(TensorProductElement(FlattenedDimensions(TensorProductElement(Lagrange(I, 2), Lagrange(I, 2))), Lagrange(I, 2)))"),
]
@pytest.mark.parametrize('element', elements)
def test_nodality(element):
"""Check that generated elements are nodal, i.e. nodes evaluated
on basis functions give Kronecker delta
"""
# Instantiate element lazily
element = eval(element)
# Fetch primal and dual basis
poly_set = element.get_nodal_basis()
dual_set = element.get_dual_set()
assert poly_set.get_reference_element() == dual_set.get_reference_element()
# Get coeffs of primal and dual bases w.r.t. expansion set
coeffs_poly = poly_set.get_coeffs()
coeffs_dual = dual_set.to_riesz(poly_set)
assert coeffs_poly.shape == coeffs_dual.shape
# Check nodality
for i in range(coeffs_dual.shape[0]):
for j in range(coeffs_poly.shape[0]):
assert np.isclose(
coeffs_dual[i].flatten().dot(coeffs_poly[j].flatten()),
1.0 if i == j else 0.0
)
@pytest.mark.parametrize('elements', [
(Lagrange(I, 2), Bubble(I, 2)),
(Lagrange(T, 3), Bubble(T, 3)),
(Lagrange(S, 4), Bubble(S, 4)),
(Lagrange(I, 1), Lagrange(I, 1)),
(Lagrange(I, 1), Bubble(I, 2), Bubble(I, 2)),
])
def test_illposed_nodal_enriched(elements):
"""Check that nodal enriched element fails on ill-posed
(non-unisolvent) case
"""
with pytest.raises(np.linalg.LinAlgError):
NodalEnrichedElement(*elements)
def test_empty_bubble():
"Check that bubble of too low degree fails"
with pytest.raises(RuntimeError):
Bubble(I, 1)
with pytest.raises(RuntimeError):
Bubble(T, 2)
with pytest.raises(RuntimeError):
Bubble(S, 3)
def test_nodal_enriched_implementation():
"""Following element pair should be the same.
This might be fragile to dof reordering but works now.
"""
e0 = RaviartThomas(T, 2)
e1 = NodalEnrichedElement(
RestrictedElement(RaviartThomas(T, 2), restriction_domain='facet'),
RestrictedElement(RaviartThomas(T, 2), restriction_domain='interior')
)
for attr in ["degree",
"get_reference_element",
"entity_dofs",
"entity_closure_dofs",
"get_formdegree",
"mapping",
"num_sub_elements",
"space_dimension",
"value_shape",
"is_nodal",
]:
assert getattr(e0, attr)() == getattr(e1, attr)()
assert np.allclose(e0.get_coeffs(), e1.get_coeffs())
assert np.allclose(e0.dmats(), e1.dmats())
assert np.allclose(e0.get_dual_set().to_riesz(e0.get_nodal_basis()),
e1.get_dual_set().to_riesz(e1.get_nodal_basis()))
def test_mixed_is_nodal():
element = MixedElement([DiscontinuousLagrange(T, 1), RaviartThomas(T, 2)])
assert element.is_nodal()
def test_mixed_is_not_nodal():
element = MixedElement([
EnrichedElement(
RaviartThomas(T, 1),
RestrictedElement(RaviartThomas(T, 2), restriction_domain="interior")
),
DiscontinuousLagrange(T, 1)
])
assert not element.is_nodal()
@pytest.mark.parametrize('element', [
"TensorProductElement(Lagrange(I, 1), Lagrange(I, 1))",
"TensorProductElement(Lagrange(I, 2), Lagrange(I, 2))",
"TensorProductElement(TensorProductElement(Lagrange(I, 1), Lagrange(I, 1)), Lagrange(I, 1))",
"TensorProductElement(TensorProductElement(Lagrange(I, 2), Lagrange(I, 2)), Lagrange(I, 2))",
"FlattenedDimensions(TensorProductElement(Lagrange(I, 1), Lagrange(I, 1)))",
"FlattenedDimensions(TensorProductElement(Lagrange(I, 2), Lagrange(I, 2)))",
"FlattenedDimensions(TensorProductElement(FlattenedDimensions(TensorProductElement(Lagrange(I, 1), Lagrange(I, 1))), Lagrange(I, 1)))",
"FlattenedDimensions(TensorProductElement(FlattenedDimensions(TensorProductElement(Lagrange(I, 2), Lagrange(I, 2))), Lagrange(I, 2)))",
])
def test_nodality_tabulate(element):
"""Check that certain elements (which do no implement
get_nodal_basis) are nodal too, by tabulating at nodes
(assuming nodes are point evaluation)
"""
# Instantiate element
element = eval(element)
# Get nodes coordinates
nodes_coords = []
for node in element.dual_basis():
# Assume point evaluation
(coords, weights), = node.get_point_dict().items()
assert weights == [(1.0, ())]
nodes_coords.append(coords)
# Check nodality
for j, x in enumerate(nodes_coords):
basis, = element.tabulate(0, (x,)).values()
for i in range(len(basis)):
assert np.isclose(basis[i], 1.0 if i == j else 0.0)
@pytest.mark.parametrize('element', [
"HDivTrace(T, 0)",
"HDivTrace(T, 1)",
"HDivTrace(T, 2)",
"HDivTrace(T, 3)",
"HDivTrace(S, 0)",
"HDivTrace(S, 1)",
"HDivTrace(S, 2)",
"HDivTrace(S, 3)",
])
def test_facet_nodality_tabulate(element):
"""Check that certain elements (which do no implement get_nodal_basis)
are nodal too, by tabulating facet-wise at nodes (assuming nodes
are point evaluation)
"""
# Instantiate element
element = eval(element)
# Dof/Node coordinates and respective facet
nodes_coords = []
# Iterate over facet degrees of freedom
entity_dofs = element.dual.entity_ids
facet_dim = sorted(entity_dofs.keys())[-2]
facet_dofs = entity_dofs[facet_dim]
dofs = element.dual_basis()
vertices = element.ref_el.vertices
for (facet, indices) in facet_dofs.items():
for i in indices:
node = dofs[i]
# Assume point evaluation
(coords, weights), = node.get_point_dict().items()
assert weights == [(1.0, ())]
# Map dof coordinates to reference element due to
# HdivTrace interface peculiarity
ref_coords, = map_to_reference_facet((coords,), vertices, facet)
nodes_coords.append((facet, ref_coords))
# Check nodality
for j, (facet, x) in enumerate(nodes_coords):
basis, = element.tabulate(0, (x,), entity=(facet_dim, facet)).values()
for i in range(len(basis)):
assert np.isclose(basis[i], 1.0 if i == j else 0.0)
@pytest.mark.parametrize('element', [
'Nedelec(S, 3, variant="integral(2)")',
'NedelecSecondKind(S, 3, variant="integral(3)")'
])
def test_error_quadrature_degree(element):
with pytest.raises(ValueError):
eval(element)
if __name__ == '__main__':
import os
pytest.main(os.path.abspath(__file__))
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_gauss_legendre.py 0000664 0000000 0000000 00000003027 14135323752 0024037 0 ustar 00root root 0000000 0000000 # Copyright (C) 2016 Imperial College London and others
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
#
# Authors:
#
# David Ham
import pytest
import numpy as np
@pytest.mark.parametrize("degree", range(1, 7))
def test_gl_basis_values(degree):
"""Ensure that integrating a simple monomial produces the expected results."""
from FIAT import ufc_simplex, GaussLegendre, make_quadrature
s = ufc_simplex(1)
q = make_quadrature(s, degree + 1)
fe = GaussLegendre(s, degree)
tab = fe.tabulate(0, q.pts)[(0,)]
for test_degree in range(degree + 1):
coefs = [n(lambda x: x[0]**test_degree) for n in fe.dual.nodes]
integral = np.dot(coefs, np.dot(tab, q.wts))
reference = np.dot([x[0]**test_degree
for x in q.pts], q.wts)
assert np.allclose(integral, reference, rtol=1e-14)
if __name__ == '__main__':
import os
pytest.main(os.path.abspath(__file__))
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_gauss_lobatto_legendre.py 0000664 0000000 0000000 00000003046 14135323752 0025564 0 ustar 00root root 0000000 0000000 # Copyright (C) 2016 Imperial College London and others
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
#
# Authors:
#
# David Ham
import pytest
import numpy as np
@pytest.mark.parametrize("degree", range(1, 7))
def test_gll_basis_values(degree):
"""Ensure that integrating a simple monomial produces the expected results."""
from FIAT import ufc_simplex, GaussLobattoLegendre, make_quadrature
s = ufc_simplex(1)
q = make_quadrature(s, degree + 1)
fe = GaussLobattoLegendre(s, degree)
tab = fe.tabulate(0, q.pts)[(0,)]
for test_degree in range(degree + 1):
coefs = [n(lambda x: x[0]**test_degree) for n in fe.dual.nodes]
integral = np.dot(coefs, np.dot(tab, q.wts))
reference = np.dot([x[0]**test_degree
for x in q.pts], q.wts)
assert np.allclose(integral, reference, rtol=1e-14)
if __name__ == '__main__':
import os
pytest.main(os.path.abspath(__file__))
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_gauss_radau.py 0000664 0000000 0000000 00000003066 14135323752 0023351 0 ustar 00root root 0000000 0000000 # Copyright (C) 2016 Imperial College London and others
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
#
# Authors:
#
# Robert Kirby, based on work of David A. Ham
#
import pytest
import numpy as np
@pytest.mark.parametrize("degree", range(1, 7))
def test_gll_basis_values(degree):
"""Ensure that integrating a simple monomial produces the expected results."""
from FIAT import ufc_simplex, GaussRadau, make_quadrature
s = ufc_simplex(1)
q = make_quadrature(s, degree + 1)
fe = GaussRadau(s, degree)
tab = fe.tabulate(0, q.pts)[(0,)]
for test_degree in range(degree + 1):
coefs = [n(lambda x: x[0]**test_degree) for n in fe.dual.nodes]
integral = np.dot(coefs, np.dot(tab, q.wts))
reference = np.dot([x[0]**test_degree
for x in q.pts], q.wts)
assert np.allclose(integral, reference, rtol=1e-14)
if __name__ == '__main__':
import os
pytest.main(os.path.abspath(__file__))
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_hdivtrace.py 0000664 0000000 0000000 00000012771 14135323752 0023027 0 ustar 00root root 0000000 0000000 # Copyright (C) 2016 Imperial College London and others
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
#
# Authors:
#
# Thomas Gibson (t.gibson15@imperial.ac.uk)
import pytest
import numpy as np
@pytest.mark.parametrize("dim", (2, 3))
@pytest.mark.parametrize("degree", range(7))
def test_basis_values(dim, degree):
"""Ensure that integrating simple monomials produces the expected results
for each facet entity of the reference triangle and tetrahedron.
This test performs the trace tabulation in two ways:
(1) The entity is not specified, in which case the element uses
numerical tolerance to determine the facet id;
(2) The entity pair (dim, id) is provided, and the trace element
tabulates accordingly using the new tabulate API.
"""
from FIAT import ufc_simplex, HDivTrace, make_quadrature
ref_el = ufc_simplex(dim)
quadrule = make_quadrature(ufc_simplex(dim - 1), degree + 1)
fiat_element = HDivTrace(ref_el, degree)
facet_element = fiat_element.dg_elements[dim - 1]
nf = facet_element.space_dimension()
for facet_id in range(dim + 1):
# Tabulate without an entity pair given --- need to map to cell coordinates
cell_transform = ref_el.get_entity_transform(dim - 1, facet_id)
cell_points = np.array(list(map(cell_transform, quadrule.pts)))
ctab = fiat_element.tabulate(0, cell_points)[(0,) * dim][nf*facet_id:nf*(facet_id + 1)]
# Tabulate with entity pair provided
entity = (ref_el.get_spatial_dimension() - 1, facet_id)
etab = fiat_element.tabulate(0, quadrule.pts,
entity)[(0,) * dim][nf*facet_id:nf*(facet_id + 1)]
for test_degree in range(degree + 1):
coeffs = [n(lambda x: x[0]**test_degree)
for n in facet_element.dual.nodes]
cintegral = np.dot(coeffs, np.dot(ctab, quadrule.wts))
eintegral = np.dot(coeffs, np.dot(etab, quadrule.wts))
assert np.allclose(cintegral, eintegral, rtol=1e-14)
reference = np.dot([x[0]**test_degree
for x in quadrule.pts], quadrule.wts)
assert np.allclose(cintegral, reference, rtol=1e-14)
assert np.allclose(eintegral, reference, rtol=1e-14)
@pytest.mark.parametrize("degree", range(4))
def test_quad_trace(degree):
"""Test the trace element defined on a quadrilateral cell"""
from FIAT import ufc_simplex, HDivTrace, make_quadrature
from FIAT.reference_element import TensorProductCell
tpc = TensorProductCell(ufc_simplex(1), ufc_simplex(1))
fiat_element = HDivTrace(tpc, (degree, degree))
facet_elements = fiat_element.dg_elements
quadrule = make_quadrature(ufc_simplex(1), degree + 1)
for i, entity in enumerate([((0, 1), 0), ((0, 1), 1),
((1, 0), 0), ((1, 0), 1)]):
entity_dim, _ = entity
element = facet_elements[entity_dim]
nf = element.space_dimension()
tab = fiat_element.tabulate(0, quadrule.pts,
entity)[(0, 0)][nf*i:nf*(i+1)]
for test_degree in range(degree + 1):
coeffs = [n(lambda x: x[0]**test_degree)
for n in element.dual.nodes]
integral = np.dot(coeffs, np.dot(tab, quadrule.wts))
reference = np.dot([x[0]**test_degree
for x in quadrule.pts], quadrule.wts)
assert np.allclose(integral, reference, rtol=1e-14)
@pytest.mark.parametrize("dim", (2, 3))
@pytest.mark.parametrize("order", range(1, 4))
@pytest.mark.parametrize("degree", range(4))
def test_gradient_traceerror(dim, order, degree):
"""Ensure that the TraceError appears in the appropriate dict entries when
attempting to tabulate certain orders of derivatives."""
from FIAT import ufc_simplex, HDivTrace, make_quadrature
from FIAT.hdiv_trace import TraceError
fiat_element = HDivTrace(ufc_simplex(dim), degree)
pts = make_quadrature(ufc_simplex(dim - 1), degree + 1).pts
for facet_id in range(dim + 1):
tab = fiat_element.tabulate(order, pts, entity=(dim - 1, facet_id))
for key in tab.keys():
if key != (0,)*dim:
assert isinstance(tab[key], TraceError)
@pytest.mark.parametrize("dim", (2, 3))
@pytest.mark.parametrize("degree", range(4))
def test_cell_traceerror(dim, degree):
"""Ensure that the TraceError appears in all dict entries when deliberately
attempting to tabulate the cell of a trace element."""
from FIAT import ufc_simplex, HDivTrace, make_quadrature
from FIAT.hdiv_trace import TraceError
fiat_element = HDivTrace(ufc_simplex(dim), degree)
pts = make_quadrature(ufc_simplex(dim), 1).pts
tab = fiat_element.tabulate(0, pts, entity=(dim, 0))
for key in tab.keys():
assert isinstance(tab[key], TraceError)
if __name__ == '__main__':
import os
pytest.main(os.path.abspath(__file__))
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_kong_mulder_veldhuizen.py 0000664 0000000 0000000 00000012474 14135323752 0025621 0 ustar 00root root 0000000 0000000 import numpy as np
import pytest
from FIAT.reference_element import UFCInterval, UFCTriangle, UFCTetrahedron
from FIAT import create_quadrature, make_quadrature, polynomial_set
from FIAT.kong_mulder_veldhuizen import KongMulderVeldhuizen as KMV
I = UFCInterval()
T = UFCTriangle()
Te = UFCTetrahedron()
@pytest.mark.parametrize("p_d", [(1, 1), (2, 3), (3, 4)])
def test_kmv_quad_tet_schemes(p_d): # noqa: W503
fct = np.math.factorial
p, d = p_d
q = create_quadrature(Te, p, "KMV")
for i in range(d + 1):
for j in range(d + 1 - i):
for k in range(d + 1 - i - j):
trueval = fct(i) * fct(j) * fct(k) / fct(i + j + k + 3)
assert (
np.abs(
trueval -
q.integrate(lambda x: x[0] ** i * x[1] ** j * x[2] ** k)
) <
1.0e-10
)
@pytest.mark.parametrize("p_d", [(1, 1), (2, 3), (3, 5), (4, 7), (5, 9)])
def test_kmv_quad_tri_schemes(p_d):
fct = np.math.factorial
p, d = p_d
q = create_quadrature(T, p, "KMV")
for i in range(d + 1):
for j in range(d + 1 - i):
trueval = fct(i) * fct(j) / fct(i + j + 2)
assert (
np.abs(trueval - q.integrate(lambda x: x[0] ** i * x[1] ** j)) < 1.0e-10
)
@pytest.mark.parametrize(
"element_degree",
[(KMV(T, 1), 1), (KMV(T, 2), 2), (KMV(T, 3), 3), (KMV(T, 4), 4), (KMV(T, 5), 5)],
)
def test_Kronecker_property_tris(element_degree):
"""
Evaluating the nodal basis at the special quadrature points should
have a Kronecker property. Also checks that the basis functions
and quadrature points are given the same ordering.
"""
element, degree = element_degree
qr = create_quadrature(T, degree, scheme="KMV")
(basis,) = element.tabulate(0, qr.get_points()).values()
assert np.allclose(basis, np.eye(*basis.shape))
@pytest.mark.parametrize(
"element_degree", [(KMV(Te, 1), 1), (KMV(Te, 2), 2), (KMV(Te, 3), 3)]
)
def test_Kronecker_property_tets(element_degree):
"""
Evaluating the nodal basis at the special quadrature points should
have a Kronecker property. Also checks that the basis functions
and quadrature points are given the same ordering.
"""
element, degree = element_degree
qr = create_quadrature(Te, degree, scheme="KMV")
(basis,) = element.tabulate(0, qr.get_points()).values()
assert np.allclose(basis, np.eye(*basis.shape))
@pytest.mark.parametrize("degree", [2, 3, 4])
def test_edge_degree(degree):
"""Verify that the outer edges of a degree KMV element
are indeed of degree and the interior is of degree+1"""
# create a degree+1 polynomial
I = UFCInterval()
# an exact quad. rule for a degree+1 polynomial on the UFCinterval
qr = make_quadrature(I, degree + 1)
W = np.diag(qr.wts)
sd = I.get_spatial_dimension()
pset = polynomial_set.ONPolynomialSet(I, degree + 1, (sd,))
pset = pset.take([degree + 1])
# tabulate at the quadrature points
interval_vals = pset.tabulate(qr.get_points())[(0,)]
interval_vals = np.squeeze(interval_vals)
# create degree KMV element (should have degree outer edges and degree+1 edge in center)
T = UFCTriangle()
element = KMV(T, degree)
# tabulate values on an edge of the KMV element
for e in range(3):
edge_values = element.tabulate(0, qr.get_points(), (1, e))[(0, 0)]
# degree edge should be orthogonal to degree+1 ONpoly edge values
result = edge_values @ W @ interval_vals.T
assert np.allclose(np.sum(result), 0.0)
@pytest.mark.parametrize(
"element_degree",
[(KMV(T, 1), 1), (KMV(T, 2), 2), (KMV(T, 3), 3), (KMV(T, 4), 4), (KMV(T, 5), 5)],
)
def test_interpolate_monomials_tris(element_degree):
element, degree = element_degree
# ordered the same way as KMV nodes
pts = create_quadrature(T, degree, "KMV").pts
Q = make_quadrature(T, 2 * degree)
phis = element.tabulate(0, Q.pts)[0, 0]
print("deg", degree)
for i in range(degree + 1):
for j in range(degree + 1 - i):
m = lambda x: x[0] ** i * x[1] ** j
dofs = np.array([m(pt) for pt in pts])
interp = phis.T @ dofs
matqp = np.array([m(pt) for pt in Q.pts])
err = 0.0
for k in range(phis.shape[1]):
err += Q.wts[k] * (interp[k] - matqp[k]) ** 2
assert np.sqrt(err) <= 1.0e-12
@pytest.mark.parametrize(
"element_degree", [(KMV(Te, 1), 1), (KMV(Te, 2), 2), (KMV(Te, 3), 3)]
)
def test_interpolate_monomials_tets(element_degree):
element, degree = element_degree
# ordered the same way as KMV nodes
pts = create_quadrature(Te, degree, "KMV").pts
Q = make_quadrature(Te, 2 * degree)
phis = element.tabulate(0, Q.pts)[0, 0, 0]
print("deg", degree)
for i in range(degree + 1):
for j in range(degree + 1 - i):
for k in range(degree + 1 - i - j):
m = lambda x: x[0] ** i * x[1] ** j * x[2] ** k
dofs = np.array([m(pt) for pt in pts])
interp = phis.T @ dofs
matqp = np.array([m(pt) for pt in Q.pts])
err = 0.0
for kk in range(phis.shape[1]):
err += Q.wts[kk] * (interp[kk] - matqp[kk]) ** 2
assert np.sqrt(err) <= 1.0e-12
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_mtw.py 0000664 0000000 0000000 00000002700 14135323752 0021654 0 ustar 00root root 0000000 0000000 import numpy as np
from FIAT import ufc_simplex, MardalTaiWinther, make_quadrature, expansions
def test_dofs():
line = ufc_simplex(1)
T = ufc_simplex(2)
T.vertices = np.random.rand(3, 2)
MTW = MardalTaiWinther(T, 3)
Qline = make_quadrature(line, 6)
linebfs = expansions.LineExpansionSet(line)
linevals = linebfs.tabulate(1, Qline.pts)
for ed in range(3):
n = T.compute_scaled_normal(ed)
wts = np.asarray(Qline.wts)
vals = MTW.tabulate(0, Qline.pts, (1, ed))[(0, 0)]
nvals = np.dot(np.transpose(vals, (0, 2, 1)), n)
normal_moments = np.zeros((9, 2))
for bf in range(9):
for k in range(len(Qline.wts)):
for m in (0, 1):
normal_moments[bf, m] += wts[k] * nvals[bf, k] * linevals[m, k]
right = np.zeros((9, 2))
right[3*ed, 0] = 1.0
right[3*ed+2, 1] = 1.0
assert np.allclose(normal_moments, right)
for ed in range(3):
t = T.compute_edge_tangent(ed)
wts = np.asarray(Qline.wts)
vals = MTW.tabulate(0, Qline.pts, (1, ed))[(0, 0)]
tvals = np.dot(np.transpose(vals, (0, 2, 1)), t)
tangent_moments = np.zeros(9)
for bf in range(9):
for k in range(len(Qline.wts)):
tangent_moments[bf] += wts[k] * tvals[bf, k] * linevals[0, k]
right = np.zeros(9)
right[3*ed + 1] = 1.0
assert np.allclose(tangent_moments, right)
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_pointwise_dual.py 0000664 0000000 0000000 00000001672 14135323752 0024102 0 ustar 00root root 0000000 0000000 # Copyright (C) 2020 Robert C Kirby (Baylor University)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import pytest
import numpy
from FIAT import (
BrezziDouglasMarini, Morley, QuinticArgyris, CubicHermite)
from FIAT.reference_element import (
UFCTriangle,
make_lattice)
from FIAT.pointwise_dual import compute_pointwise_dual as cpd
T = UFCTriangle()
@pytest.mark.parametrize("element",
[CubicHermite(T),
Morley(T),
QuinticArgyris(T),
BrezziDouglasMarini(T, 1, variant="integral")])
def test_pw_dual(element):
deg = element.degree()
ref_el = element.ref_el
poly_set = element.poly_set
pts = make_lattice(ref_el.vertices, deg)
assert numpy.allclose(element.dual.to_riesz(poly_set),
cpd(element, pts).to_riesz(poly_set))
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_quadrature.py 0000664 0000000 0000000 00000020123 14135323752 0023221 0 ustar 00root root 0000000 0000000 # Copyright (C) 2015 Imperial College London and others.
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
#
# Written by David A. Ham (david.ham@imperial.ac.uk), 2015
import numpy
import pytest
import FIAT
from FIAT.reference_element import UFCInterval, UFCTriangle, UFCTetrahedron
from FIAT.reference_element import UFCQuadrilateral, UFCHexahedron, TensorProductCell
@pytest.fixture(scope='module')
def interval():
return UFCInterval()
@pytest.fixture(scope='module')
def triangle():
return UFCTriangle()
@pytest.fixture(scope='module')
def tetrahedron():
return UFCTetrahedron()
@pytest.fixture(scope='module')
def quadrilateral():
return UFCQuadrilateral()
@pytest.fixture(scope='module')
def hexahedron():
return UFCHexahedron()
# This unified fixture enables tests parametrised over different cells.
@pytest.fixture(params=["interval",
"triangle",
"quadrilateral",
"hexahedron"])
def cell(request):
if request.param == "interval":
return UFCInterval()
elif request.param == "triangle":
return UFCTriangle()
elif request.param == "quadrilateral":
return UFCTriangle()
elif request.param == "hexahedron":
return UFCTriangle()
@pytest.fixture(scope='module')
def extr_interval():
"""Extruded interval = interval x interval"""
return TensorProductCell(UFCInterval(), UFCInterval())
@pytest.fixture(scope='module')
def extr_triangle():
"""Extruded triangle = triangle x interval"""
return TensorProductCell(UFCTriangle(), UFCInterval())
@pytest.fixture(scope='module')
def extr_quadrilateral():
"""Extruded quadrilateral = quadrilateral x interval"""
return TensorProductCell(UFCQuadrilateral(), UFCInterval())
# This unified fixture enables tests parametrised over different extruded cells.
@pytest.fixture(params=["extr_interval",
"extr_triangle",
"extr_quadrilateral"])
def extr_cell(request):
if request.param == "extr_interval":
return TensorProductCell(UFCInterval(), UFCInterval())
elif request.param == "extr_triangle":
return TensorProductCell(UFCTriangle(), UFCInterval())
elif request.param == "extr_quadrilateral":
return TensorProductCell(UFCQuadrilateral(), UFCInterval())
@pytest.fixture(params=["canonical", "default"])
def scheme(request):
return request.param
def test_invalid_quadrature_rule():
from FIAT.quadrature import QuadratureRule
with pytest.raises(ValueError):
QuadratureRule(UFCInterval(), [[0.5, 0.5]], [0.5, 0.5, 0.5])
@pytest.mark.parametrize("degree", range(8))
def test_create_quadrature_interval(interval, degree, scheme):
q = FIAT.create_quadrature(interval, degree, scheme)
assert numpy.allclose(q.integrate(lambda x: x[0]**degree), 1/(degree + 1))
@pytest.mark.parametrize("degree", range(8))
def test_create_quadrature_triangle(triangle, degree, scheme):
q = FIAT.create_quadrature(triangle, degree, scheme)
assert numpy.allclose(q.integrate(lambda x: sum(x)**degree), 1/(degree + 2))
@pytest.mark.parametrize("degree", range(8))
def test_create_quadrature_tetrahedron(tetrahedron, degree, scheme):
q = FIAT.create_quadrature(tetrahedron, degree, scheme)
assert numpy.allclose(q.integrate(lambda x: sum(x)**degree), 1/(2*degree + 6))
@pytest.mark.parametrize("extrdeg", range(4))
@pytest.mark.parametrize("basedeg", range(5))
def test_create_quadrature_extr_interval(extr_interval, basedeg, extrdeg, scheme):
q = FIAT.create_quadrature(extr_interval, (basedeg, extrdeg), scheme)
assert numpy.allclose(q.integrate(lambda x: x[0]**basedeg * x[1]**extrdeg),
1/(basedeg + 1) * 1/(extrdeg + 1))
@pytest.mark.parametrize("extrdeg", range(4))
@pytest.mark.parametrize("basedeg", range(5))
def test_create_quadrature_extr_triangle(extr_triangle, basedeg, extrdeg, scheme):
q = FIAT.create_quadrature(extr_triangle, (basedeg, extrdeg), scheme)
assert numpy.allclose(q.integrate(lambda x: (x[0] + x[1])**basedeg * x[2]**extrdeg),
1/(basedeg + 2) * 1/(extrdeg + 1))
@pytest.mark.parametrize("degree", range(8))
def test_create_quadrature_quadrilateral(quadrilateral, degree, scheme):
q = FIAT.create_quadrature(quadrilateral, degree, scheme)
assert numpy.allclose(q.integrate(lambda x: sum(x)**degree),
(2**(degree + 2) - 2) / ((degree + 1)*(degree + 2)))
@pytest.mark.parametrize("degree", range(8))
def test_create_quadrature_hexahedron(hexahedron, degree, scheme):
q = FIAT.create_quadrature(hexahedron, degree, scheme)
assert numpy.allclose(q.integrate(lambda x: sum(x)**degree),
-3 * (2**(degree + 3) - 3**(degree + 2) - 1) / ((degree + 1)*(degree + 2)*(degree + 3)))
@pytest.mark.parametrize("extrdeg", range(4))
@pytest.mark.parametrize("basedeg", range(5))
def test_create_quadrature_extr_quadrilateral(extr_quadrilateral, basedeg, extrdeg, scheme):
q = FIAT.create_quadrature(extr_quadrilateral, (basedeg, extrdeg), scheme)
assert numpy.allclose(q.integrate(lambda x: (x[0] + x[1])**basedeg * x[2]**extrdeg),
(2**(basedeg + 2) - 2) / ((basedeg + 1)*(basedeg + 2)) * 1/(extrdeg + 1))
def test_invalid_quadrature_degree(cell, scheme):
with pytest.raises(ValueError):
FIAT.create_quadrature(cell, -1, scheme)
def test_invalid_quadrature_degree_tensor_prod(extr_cell):
with pytest.raises(ValueError):
FIAT.create_quadrature(extr_cell, (-1, -1))
def test_tensor_product_composition(interval, triangle, extr_triangle, scheme):
degree = (4, 4)
qa = FIAT.create_quadrature(triangle, degree[0], scheme)
qb = FIAT.create_quadrature(interval, degree[1], scheme)
q = FIAT.create_quadrature(extr_triangle, degree, scheme)
assert len(q.get_points()) == len(qa.get_points())*len(qb.get_points())
@pytest.mark.parametrize(("points, degree"), tuple((p, d)
for p in range(2, 10)
for d in range(2*p - 2)))
def test_gauss_lobatto_legendre_quadrature(interval, points, degree):
"""Check that the quadrature rules correctly integrate all the right
polynomial degrees."""
q = FIAT.quadrature.GaussLobattoLegendreQuadratureLineRule(interval, points)
assert numpy.round(q.integrate(lambda x: x[0]**degree) - 1./(degree+1), 14) == 0.
@pytest.mark.parametrize(("points, degree"), tuple((p, d)
for p in range(2, 10)
for d in range(2*p - 1)))
def test_radau_legendre_quadrature(interval, points, degree):
"""Check that the quadrature rules correctly integrate all the right
polynomial degrees."""
q = FIAT.quadrature.RadauQuadratureLineRule(interval, points)
assert numpy.round(q.integrate(lambda x: x[0]**degree) - 1./(degree+1), 14) == 0.
@pytest.mark.parametrize(("points, degree"), tuple((p, d)
for p in range(2, 10)
for d in range(2*p)))
def test_gauss_legendre_quadrature(interval, points, degree):
"""Check that the quadrature rules correctly integrate all the right
polynomial degrees."""
q = FIAT.quadrature.GaussLegendreQuadratureLineRule(interval, points)
assert numpy.round(q.integrate(lambda x: x[0]**degree) - 1./(degree+1), 14) == 0.
if __name__ == '__main__':
import os
pytest.main(os.path.abspath(__file__))
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_quadrature_element.py 0000664 0000000 0000000 00000003767 14135323752 0024751 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 Miklos Homolya
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
import pytest
import numpy as np
from FIAT import QuadratureElement, make_quadrature, ufc_simplex
@pytest.fixture(params=[1, 2, 3])
def cell(request):
return ufc_simplex(request.param)
@pytest.fixture
def quadrature(cell):
return make_quadrature(cell, 2)
@pytest.fixture
def element(cell, quadrature):
return QuadratureElement(cell, quadrature.get_points())
def test_order(element, quadrature):
with pytest.raises(ValueError):
element.tabulate(1, quadrature.get_points())
def test_points(element, quadrature):
points = quadrature.get_points()
wrong_points = np.linspace(0.0, 1.0, points.size).reshape(points.shape)
with pytest.raises(AssertionError):
element.tabulate(0, wrong_points)
def test_entity(element, quadrature):
dim = element.get_reference_element().get_spatial_dimension()
points = make_quadrature(ufc_simplex(dim - 1), 2).get_points()
with pytest.raises(ValueError):
element.tabulate(0, points, entity=(dim - 1, 1))
def test_result(element, quadrature):
dim = element.get_reference_element().get_spatial_dimension()
points = quadrature.get_points()
actual = element.tabulate(0, points)[(0,) * dim]
assert np.allclose(np.eye(len(points)), actual)
if __name__ == '__main__':
import os
pytest.main(os.path.abspath(__file__))
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_reference_element.py 0000664 0000000 0000000 00000015700 14135323752 0024520 0 ustar 00root root 0000000 0000000 # Copyright (C) 2016 Miklos Homolya
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
import pytest
import numpy as np
import sys
from FIAT.reference_element import UFCInterval, UFCTriangle, UFCTetrahedron
from FIAT.reference_element import Point, TensorProductCell, UFCQuadrilateral, UFCHexahedron
point = Point()
interval = UFCInterval()
triangle = UFCTriangle()
quadrilateral = UFCQuadrilateral()
hexahedron = UFCHexahedron()
tetrahedron = UFCTetrahedron()
interval_x_interval = TensorProductCell(interval, interval)
triangle_x_interval = TensorProductCell(triangle, interval)
quadrilateral_x_interval = TensorProductCell(quadrilateral, interval)
ufc_tetrahedron_21connectivity = [(0, 1, 2), (0, 3, 4), (1, 3, 5), (2, 4, 5)]
ufc_hexahedron_21connectivity = [(0, 1, 4, 5), (2, 3, 6, 7), (0, 2, 8, 9),
(1, 3, 10, 11), (4, 6, 8, 10), (5, 7, 9, 11)]
@pytest.mark.parametrize(('cell', 'connectivity'),
[(tetrahedron, ufc_tetrahedron_21connectivity),
(hexahedron, ufc_hexahedron_21connectivity),
pytest.param(triangle_x_interval, [], marks=pytest.mark.xfail),
pytest.param(quadrilateral_x_interval, [], marks=pytest.mark.xfail)])
def test_ufc_connectivity_21(cell, connectivity):
"""Check face-edge connectivity builds what UFC expects.
This is only non-trivial case ; the rest is x-0 and D-x,
see below."""
assert cell.get_connectivity()[(2, 1)] == connectivity
@pytest.mark.parametrize('cell',
[point, interval, triangle, tetrahedron,
quadrilateral, hexahedron,
pytest.param(interval_x_interval, marks=pytest.mark.xfail),
pytest.param(triangle_x_interval, marks=pytest.mark.xfail),
pytest.param(quadrilateral_x_interval, marks=pytest.mark.xfail)])
def test_ufc_connectivity_x0(cell):
"""Check x-0 connectivity is just what get_topology gives"""
for dim0 in range(cell.get_spatial_dimension()+1):
connectivity = cell.get_connectivity()[(dim0, 0)]
topology = cell.get_topology()[dim0]
assert len(connectivity) == len(topology)
assert all(connectivity[i] == t for i, t in topology.items())
@pytest.mark.parametrize('cell',
[point, interval, triangle, tetrahedron,
quadrilateral, hexahedron,
pytest.param(interval_x_interval, marks=pytest.mark.xfail),
pytest.param(triangle_x_interval, marks=pytest.mark.xfail),
pytest.param(quadrilateral_x_interval, marks=pytest.mark.xfail)])
def test_ufc_connectivity_Dx(cell):
"""Check D-x connectivity is just [(0,1,2,...)]"""
D = cell.get_spatial_dimension()
for dim1 in range(D+1):
connectivity = cell.get_connectivity()[(D, dim1)]
assert len(connectivity) == 1
assert connectivity[0] == tuple(range(len(connectivity[0])))
@pytest.mark.parametrize(('cell', 'volume'),
[pytest.param(point, 1, marks=pytest.mark.xfail(conditional=sys.version_info < (3, 6))),
(interval, 1),
(triangle, 1/2),
(quadrilateral, 1),
(tetrahedron, 1/6),
(interval_x_interval, 1),
(triangle_x_interval, 1/2),
(quadrilateral_x_interval, 1),
(hexahedron, 1)])
def test_volume(cell, volume):
assert np.allclose(volume, cell.volume())
@pytest.mark.parametrize(('cell', 'normals'),
[(interval, [[-1],
[1]]),
(triangle, [[1, 1],
[-1, 0],
[0, -1]]),
(quadrilateral, [[-1, 0],
[1, 0],
[0, -1],
[0, 1]]),
(tetrahedron, [[1, 1, 1],
[-1, 0, 0],
[0, -1, 0],
[0, 0, -1]]),
(hexahedron, [[-1, 0, 0],
[1, 0, 0],
[0, -1, 0],
[0, 1, 0],
[0, 0, -1],
[0, 0, 1]])])
def test_reference_normal(cell, normals):
facet_dim = cell.get_spatial_dimension() - 1
for facet_number in range(len(cell.get_topology()[facet_dim])):
assert np.allclose(normals[facet_number],
cell.compute_reference_normal(facet_dim, facet_number))
@pytest.mark.parametrize('cell',
[interval_x_interval,
triangle_x_interval,
quadrilateral_x_interval])
def test_reference_normal_horiz(cell):
dim = cell.get_spatial_dimension()
np.allclose((0,) * (dim - 1) + (-1,),
cell.compute_reference_normal((dim - 1, 0), 0)) # bottom facet
np.allclose((0,) * (dim - 1) + (1,),
cell.compute_reference_normal((dim - 1, 0), 1)) # top facet
@pytest.mark.parametrize(('cell', 'normals'),
[(interval_x_interval, [[-1, 0],
[1, 0]]),
(triangle_x_interval, [[1, 1, 0],
[-1, 0, 0],
[0, -1, 0]]),
(quadrilateral_x_interval, [[-1, 0, 0],
[1, 0, 0],
[0, -1, 0],
[0, 1, 0]])])
def test_reference_normal_vert(cell, normals):
dim = cell.get_spatial_dimension()
vert_dim = (dim - 2, 1)
for facet_number in range(len(cell.get_topology()[vert_dim])):
assert np.allclose(normals[facet_number],
cell.compute_reference_normal(vert_dim, facet_number))
if __name__ == '__main__':
import os
pytest.main(os.path.abspath(__file__))
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_regge_hhj.py 0000664 0000000 0000000 00000001064 14135323752 0022771 0 ustar 00root root 0000000 0000000 from FIAT.reference_element import UFCTriangle
from FIAT import Regge, HellanHerrmannJohnson
import numpy as np
import pytest
def test_rotated_regge_is_hhj():
triangle = UFCTriangle()
R = Regge(triangle, 0)
H = HellanHerrmannJohnson(triangle, 0)
def S(u):
return np.eye(2) * np.trace(u) - u
for (r, h) in zip(R.tabulate(0, (0.2, 0.2))[(0, 0)],
H.tabulate(0, (0.2, 0.2))[(0, 0)]):
assert np.all(np.isclose(r, S(h)))
if __name__ == '__main__':
import os
pytest.main(os.path.abspath(__file__))
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_serendipity.py 0000664 0000000 0000000 00000002716 14135323752 0023413 0 ustar 00root root 0000000 0000000 from FIAT.reference_element import (
UFCQuadrilateral, UFCInterval, TensorProductCell)
from FIAT import Serendipity
import numpy as np
import sympy
def test_serendipity_derivatives():
cell = UFCQuadrilateral()
S = Serendipity(cell, 2)
x = sympy.DeferredVector("X")
X, Y = x[0], x[1]
basis_functions = [
(1 - X)*(1 - Y),
Y*(1 - X),
X*(1 - Y),
X*Y,
Y*(1 - X)*(Y - 1),
X*Y*(Y - 1),
X*(1 - Y)*(X - 1),
X*Y*(X - 1),
]
points = [[0.5, 0.5], [0.25, 0.75]]
for alpha, actual in S.tabulate(2, points).items():
expect = list(sympy.diff(basis, *zip([X, Y], alpha))
for basis in basis_functions)
expect = list([basis.subs(dict(zip([X, Y], point)))
for point in points]
for basis in expect)
assert actual.shape == (8, 2)
assert np.allclose(np.asarray(expect, dtype=float),
actual.reshape(8, 2))
def test_dual_tensor_versus_ufc():
K0 = UFCQuadrilateral()
ell = UFCInterval()
K1 = TensorProductCell(ell, ell)
S0 = Serendipity(K0, 2)
S1 = Serendipity(K1, 2)
# since both elements go through the flattened cell to produce the
# dual basis, they ought to do *exactly* the same calculations and
# hence form exactly the same nodes.
for i in range(S0.space_dimension()):
assert S0.dual.nodes[i].pt_dict == S1.dual.nodes[i].pt_dict
fiat-2019.2.0~git20210419.7d418fa/test/unit/test_tensor_product.py 0000664 0000000 0000000 00000062331 14135323752 0024125 0 ustar 00root root 0000000 0000000 # Copyright (C) 2015-2016 Imperial College London and others
#
# This file is part of FIAT.
#
# FIAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FIAT. If not, see .
#
# Authors:
#
# Andrew McRae
import pytest
import numpy as np
from FIAT.reference_element import UFCInterval, UFCTriangle
from FIAT.lagrange import Lagrange
from FIAT.discontinuous_lagrange import DiscontinuousLagrange
from FIAT.nedelec import Nedelec
from FIAT.raviart_thomas import RaviartThomas
from FIAT.tensor_product import TensorProductElement, FlattenedDimensions
from FIAT.hdivcurl import Hdiv, Hcurl
from FIAT.enriched import EnrichedElement
def test_TFE_1Dx1D_scalar():
T = UFCInterval()
P1_DG = DiscontinuousLagrange(T, 1)
P2 = Lagrange(T, 2)
elt = TensorProductElement(P1_DG, P2)
assert elt.value_shape() == ()
tab = elt.tabulate(1, [(0.1, 0.2)])
tabA = P1_DG.tabulate(1, [(0.1,)])
tabB = P2.tabulate(1, [(0.2,)])
for da, db in [[(0,), (0,)], [(1,), (0,)], [(0,), (1,)]]:
dc = da + db
assert np.isclose(tab[dc][0][0], tabA[da][0][0]*tabB[db][0][0])
assert np.isclose(tab[dc][1][0], tabA[da][0][0]*tabB[db][1][0])
assert np.isclose(tab[dc][2][0], tabA[da][0][0]*tabB[db][2][0])
assert np.isclose(tab[dc][3][0], tabA[da][1][0]*tabB[db][0][0])
assert np.isclose(tab[dc][4][0], tabA[da][1][0]*tabB[db][1][0])
assert np.isclose(tab[dc][5][0], tabA[da][1][0]*tabB[db][2][0])
def test_TFE_1Dx1D_vector():
T = UFCInterval()
P1_DG = DiscontinuousLagrange(T, 1)
P2 = Lagrange(T, 2)
elt = TensorProductElement(P1_DG, P2)
hdiv_elt = Hdiv(elt)
hcurl_elt = Hcurl(elt)
assert hdiv_elt.value_shape() == (2,)
assert hcurl_elt.value_shape() == (2,)
tabA = P1_DG.tabulate(1, [(0.1,)])
tabB = P2.tabulate(1, [(0.2,)])
hdiv_tab = hdiv_elt.tabulate(1, [(0.1, 0.2)])
for da, db in [[(0,), (0,)], [(1,), (0,)], [(0,), (1,)]]:
dc = da + db
assert hdiv_tab[dc][0][0][0] == 0.0
assert hdiv_tab[dc][1][0][0] == 0.0
assert hdiv_tab[dc][2][0][0] == 0.0
assert hdiv_tab[dc][3][0][0] == 0.0
assert hdiv_tab[dc][4][0][0] == 0.0
assert hdiv_tab[dc][5][0][0] == 0.0
assert np.isclose(hdiv_tab[dc][0][1][0], tabA[da][0][0]*tabB[db][0][0])
assert np.isclose(hdiv_tab[dc][1][1][0], tabA[da][0][0]*tabB[db][1][0])
assert np.isclose(hdiv_tab[dc][2][1][0], tabA[da][0][0]*tabB[db][2][0])
assert np.isclose(hdiv_tab[dc][3][1][0], tabA[da][1][0]*tabB[db][0][0])
assert np.isclose(hdiv_tab[dc][4][1][0], tabA[da][1][0]*tabB[db][1][0])
assert np.isclose(hdiv_tab[dc][5][1][0], tabA[da][1][0]*tabB[db][2][0])
hcurl_tab = hcurl_elt.tabulate(1, [(0.1, 0.2)])
for da, db in [[(0,), (0,)], [(1,), (0,)], [(0,), (1,)]]:
dc = da + db
assert np.isclose(hcurl_tab[dc][0][0][0], tabA[da][0][0]*tabB[db][0][0])
assert np.isclose(hcurl_tab[dc][1][0][0], tabA[da][0][0]*tabB[db][1][0])
assert np.isclose(hcurl_tab[dc][2][0][0], tabA[da][0][0]*tabB[db][2][0])
assert np.isclose(hcurl_tab[dc][3][0][0], tabA[da][1][0]*tabB[db][0][0])
assert np.isclose(hcurl_tab[dc][4][0][0], tabA[da][1][0]*tabB[db][1][0])
assert np.isclose(hcurl_tab[dc][5][0][0], tabA[da][1][0]*tabB[db][2][0])
assert hcurl_tab[dc][0][1][0] == 0.0
assert hcurl_tab[dc][1][1][0] == 0.0
assert hcurl_tab[dc][2][1][0] == 0.0
assert hcurl_tab[dc][3][1][0] == 0.0
assert hcurl_tab[dc][4][1][0] == 0.0
assert hcurl_tab[dc][5][1][0] == 0.0
def test_TFE_2Dx1D_scalar_triangle():
S = UFCTriangle()
T = UFCInterval()
P1_DG = DiscontinuousLagrange(S, 1)
P2 = Lagrange(T, 2)
elt = TensorProductElement(P1_DG, P2)
assert elt.value_shape() == ()
tab = elt.tabulate(1, [(0.1, 0.2, 0.3)])
tabA = P1_DG.tabulate(1, [(0.1, 0.2)])
tabB = P2.tabulate(1, [(0.3,)])
for da, db in [[(0, 0), (0,)], [(1, 0), (0,)], [(0, 1), (0,)], [(0, 0), (1,)]]:
dc = da + db
assert np.isclose(tab[dc][0][0], tabA[da][0][0]*tabB[db][0][0])
assert np.isclose(tab[dc][1][0], tabA[da][0][0]*tabB[db][1][0])
assert np.isclose(tab[dc][2][0], tabA[da][0][0]*tabB[db][2][0])
assert np.isclose(tab[dc][3][0], tabA[da][1][0]*tabB[db][0][0])
assert np.isclose(tab[dc][4][0], tabA[da][1][0]*tabB[db][1][0])
assert np.isclose(tab[dc][5][0], tabA[da][1][0]*tabB[db][2][0])
assert np.isclose(tab[dc][6][0], tabA[da][2][0]*tabB[db][0][0])
assert np.isclose(tab[dc][7][0], tabA[da][2][0]*tabB[db][1][0])
assert np.isclose(tab[dc][8][0], tabA[da][2][0]*tabB[db][2][0])
def test_TFE_2Dx1D_scalar_quad():
T = UFCInterval()
P1 = Lagrange(T, 1)
P1_DG = DiscontinuousLagrange(T, 1)
elt = TensorProductElement(TensorProductElement(P1, P1_DG), P1)
assert elt.value_shape() == ()
tab = elt.tabulate(1, [(0.1, 0.2, 0.3)])
tA = P1.tabulate(1, [(0.1,)])
tB = P1_DG.tabulate(1, [(0.2,)])
tC = P1.tabulate(1, [(0.3,)])
for da, db, dc in [[(0,), (0,), (0,)], [(1,), (0,), (0,)], [(0,), (1,), (0,)], [(0,), (0,), (1,)]]:
dd = da + db + dc
assert np.isclose(tab[dd][0][0], tA[da][0][0]*tB[db][0][0]*tC[dc][0][0])
assert np.isclose(tab[dd][1][0], tA[da][0][0]*tB[db][0][0]*tC[dc][1][0])
assert np.isclose(tab[dd][2][0], tA[da][0][0]*tB[db][1][0]*tC[dc][0][0])
assert np.isclose(tab[dd][3][0], tA[da][0][0]*tB[db][1][0]*tC[dc][1][0])
assert np.isclose(tab[dd][4][0], tA[da][1][0]*tB[db][0][0]*tC[dc][0][0])
assert np.isclose(tab[dd][5][0], tA[da][1][0]*tB[db][0][0]*tC[dc][1][0])
assert np.isclose(tab[dd][6][0], tA[da][1][0]*tB[db][1][0]*tC[dc][0][0])
assert np.isclose(tab[dd][7][0], tA[da][1][0]*tB[db][1][0]*tC[dc][1][0])
def test_TFE_2Dx1D_scalar_triangle_hdiv():
S = UFCTriangle()
T = UFCInterval()
P1_DG = DiscontinuousLagrange(S, 1)
P2 = Lagrange(T, 2)
elt = Hdiv(TensorProductElement(P1_DG, P2))
assert elt.value_shape() == (3,)
tab = elt.tabulate(1, [(0.1, 0.2, 0.3)])
tabA = P1_DG.tabulate(1, [(0.1, 0.2)])
tabB = P2.tabulate(1, [(0.3,)])
for da, db in [[(0, 0), (0,)], [(1, 0), (0,)], [(0, 1), (0,)], [(0, 0), (1,)]]:
dc = da + db
assert tab[dc][0][0][0] == 0.0
assert tab[dc][1][0][0] == 0.0
assert tab[dc][2][0][0] == 0.0
assert tab[dc][3][0][0] == 0.0
assert tab[dc][4][0][0] == 0.0
assert tab[dc][5][0][0] == 0.0
assert tab[dc][6][0][0] == 0.0
assert tab[dc][7][0][0] == 0.0
assert tab[dc][8][0][0] == 0.0
assert tab[dc][0][1][0] == 0.0
assert tab[dc][1][1][0] == 0.0
assert tab[dc][2][1][0] == 0.0
assert tab[dc][3][1][0] == 0.0
assert tab[dc][4][1][0] == 0.0
assert tab[dc][5][1][0] == 0.0
assert tab[dc][6][1][0] == 0.0
assert tab[dc][7][1][0] == 0.0
assert tab[dc][8][1][0] == 0.0
assert np.isclose(tab[dc][0][2][0], tabA[da][0][0]*tabB[db][0][0])
assert np.isclose(tab[dc][1][2][0], tabA[da][0][0]*tabB[db][1][0])
assert np.isclose(tab[dc][2][2][0], tabA[da][0][0]*tabB[db][2][0])
assert np.isclose(tab[dc][3][2][0], tabA[da][1][0]*tabB[db][0][0])
assert np.isclose(tab[dc][4][2][0], tabA[da][1][0]*tabB[db][1][0])
assert np.isclose(tab[dc][5][2][0], tabA[da][1][0]*tabB[db][2][0])
assert np.isclose(tab[dc][6][2][0], tabA[da][2][0]*tabB[db][0][0])
assert np.isclose(tab[dc][7][2][0], tabA[da][2][0]*tabB[db][1][0])
assert np.isclose(tab[dc][8][2][0], tabA[da][2][0]*tabB[db][2][0])
def test_TFE_2Dx1D_scalar_triangle_hcurl():
S = UFCTriangle()
T = UFCInterval()
P1 = Lagrange(S, 1)
P1_DG = DiscontinuousLagrange(T, 1)
elt = Hcurl(TensorProductElement(P1, P1_DG))
assert elt.value_shape() == (3,)
tab = elt.tabulate(1, [(0.1, 0.2, 0.3)])
tabA = P1.tabulate(1, [(0.1, 0.2)])
tabB = P1_DG.tabulate(1, [(0.3,)])
for da, db in [[(0, 0), (0,)], [(1, 0), (0,)], [(0, 1), (0,)], [(0, 0), (1,)]]:
dc = da + db
assert tab[dc][0][0][0] == 0.0
assert tab[dc][1][0][0] == 0.0
assert tab[dc][2][0][0] == 0.0
assert tab[dc][3][0][0] == 0.0
assert tab[dc][4][0][0] == 0.0
assert tab[dc][5][0][0] == 0.0
assert tab[dc][0][1][0] == 0.0
assert tab[dc][1][1][0] == 0.0
assert tab[dc][2][1][0] == 0.0
assert tab[dc][3][1][0] == 0.0
assert tab[dc][4][1][0] == 0.0
assert tab[dc][5][1][0] == 0.0
assert np.isclose(tab[dc][0][2][0], tabA[da][0][0]*tabB[db][0][0])
assert np.isclose(tab[dc][1][2][0], tabA[da][0][0]*tabB[db][1][0])
assert np.isclose(tab[dc][2][2][0], tabA[da][1][0]*tabB[db][0][0])
assert np.isclose(tab[dc][3][2][0], tabA[da][1][0]*tabB[db][1][0])
assert np.isclose(tab[dc][4][2][0], tabA[da][2][0]*tabB[db][0][0])
assert np.isclose(tab[dc][5][2][0], tabA[da][2][0]*tabB[db][1][0])
def test_TFE_2Dx1D_scalar_quad_hdiv():
T = UFCInterval()
P1 = Lagrange(T, 1)
P1_DG = DiscontinuousLagrange(T, 1)
elt = Hdiv(TensorProductElement(TensorProductElement(P1_DG, P1_DG), P1))
assert elt.value_shape() == (3,)
tab = elt.tabulate(1, [(0.1, 0.2, 0.3)])
tA = P1_DG.tabulate(1, [(0.1,)])
tB = P1_DG.tabulate(1, [(0.2,)])
tC = P1.tabulate(1, [(0.3,)])
for da, db, dc in [[(0,), (0,), (0,)], [(1,), (0,), (0,)], [(0,), (1,), (0,)], [(0,), (0,), (1,)]]:
dd = da + db + dc
assert tab[dd][0][0][0] == 0.0
assert tab[dd][1][0][0] == 0.0
assert tab[dd][2][0][0] == 0.0
assert tab[dd][3][0][0] == 0.0
assert tab[dd][4][0][0] == 0.0
assert tab[dd][5][0][0] == 0.0
assert tab[dd][6][0][0] == 0.0
assert tab[dd][7][0][0] == 0.0
assert tab[dd][0][1][0] == 0.0
assert tab[dd][1][1][0] == 0.0
assert tab[dd][2][1][0] == 0.0
assert tab[dd][3][1][0] == 0.0
assert tab[dd][4][1][0] == 0.0
assert tab[dd][5][1][0] == 0.0
assert tab[dd][6][1][0] == 0.0
assert tab[dd][7][1][0] == 0.0
assert np.isclose(tab[dd][0][2][0], tA[da][0][0]*tB[db][0][0]*tC[dc][0][0])
assert np.isclose(tab[dd][1][2][0], tA[da][0][0]*tB[db][0][0]*tC[dc][1][0])
assert np.isclose(tab[dd][2][2][0], tA[da][0][0]*tB[db][1][0]*tC[dc][0][0])
assert np.isclose(tab[dd][3][2][0], tA[da][0][0]*tB[db][1][0]*tC[dc][1][0])
assert np.isclose(tab[dd][4][2][0], tA[da][1][0]*tB[db][0][0]*tC[dc][0][0])
assert np.isclose(tab[dd][5][2][0], tA[da][1][0]*tB[db][0][0]*tC[dc][1][0])
assert np.isclose(tab[dd][6][2][0], tA[da][1][0]*tB[db][1][0]*tC[dc][0][0])
assert np.isclose(tab[dd][7][2][0], tA[da][1][0]*tB[db][1][0]*tC[dc][1][0])
def test_TFE_2Dx1D_scalar_quad_hcurl():
T = UFCInterval()
P1 = Lagrange(T, 1)
P1_DG = DiscontinuousLagrange(T, 1)
elt = Hcurl(TensorProductElement(TensorProductElement(P1, P1), P1_DG))
assert elt.value_shape() == (3,)
tab = elt.tabulate(1, [(0.1, 0.2, 0.3)])
tA = P1.tabulate(1, [(0.1,)])
tB = P1.tabulate(1, [(0.2,)])
tC = P1_DG.tabulate(1, [(0.3,)])
for da, db, dc in [[(0,), (0,), (0,)], [(1,), (0,), (0,)], [(0,), (1,), (0,)], [(0,), (0,), (1,)]]:
dd = da + db + dc
assert tab[dd][0][0][0] == 0.0
assert tab[dd][1][0][0] == 0.0
assert tab[dd][2][0][0] == 0.0
assert tab[dd][3][0][0] == 0.0
assert tab[dd][4][0][0] == 0.0
assert tab[dd][5][0][0] == 0.0
assert tab[dd][6][0][0] == 0.0
assert tab[dd][7][0][0] == 0.0
assert tab[dd][0][1][0] == 0.0
assert tab[dd][1][1][0] == 0.0
assert tab[dd][2][1][0] == 0.0
assert tab[dd][3][1][0] == 0.0
assert tab[dd][4][1][0] == 0.0
assert tab[dd][5][1][0] == 0.0
assert tab[dd][6][1][0] == 0.0
assert tab[dd][7][1][0] == 0.0
assert np.isclose(tab[dd][0][2][0], tA[da][0][0]*tB[db][0][0]*tC[dc][0][0])
assert np.isclose(tab[dd][1][2][0], tA[da][0][0]*tB[db][0][0]*tC[dc][1][0])
assert np.isclose(tab[dd][2][2][0], tA[da][0][0]*tB[db][1][0]*tC[dc][0][0])
assert np.isclose(tab[dd][3][2][0], tA[da][0][0]*tB[db][1][0]*tC[dc][1][0])
assert np.isclose(tab[dd][4][2][0], tA[da][1][0]*tB[db][0][0]*tC[dc][0][0])
assert np.isclose(tab[dd][5][2][0], tA[da][1][0]*tB[db][0][0]*tC[dc][1][0])
assert np.isclose(tab[dd][6][2][0], tA[da][1][0]*tB[db][1][0]*tC[dc][0][0])
assert np.isclose(tab[dd][7][2][0], tA[da][1][0]*tB[db][1][0]*tC[dc][1][0])
def test_TFE_2Dx1D_vector_triangle_hdiv():
S = UFCTriangle()
T = UFCInterval()
RT1 = RaviartThomas(S, 1)
P1_DG = DiscontinuousLagrange(T, 1)
elt = Hdiv(TensorProductElement(RT1, P1_DG))
assert elt.value_shape() == (3,)
tab = elt.tabulate(1, [(0.1, 0.2, 0.3)])
tabA = RT1.tabulate(1, [(0.1, 0.2)])
tabB = P1_DG.tabulate(1, [(0.3,)])
for da, db in [[(0, 0), (0,)], [(1, 0), (0,)], [(0, 1), (0,)], [(0, 0), (1,)]]:
dc = da + db
assert np.isclose(tab[dc][0][0][0], tabA[da][0][0][0]*tabB[db][0][0])
assert np.isclose(tab[dc][1][0][0], tabA[da][0][0][0]*tabB[db][1][0])
assert np.isclose(tab[dc][2][0][0], tabA[da][1][0][0]*tabB[db][0][0])
assert np.isclose(tab[dc][3][0][0], tabA[da][1][0][0]*tabB[db][1][0])
assert np.isclose(tab[dc][4][0][0], tabA[da][2][0][0]*tabB[db][0][0])
assert np.isclose(tab[dc][5][0][0], tabA[da][2][0][0]*tabB[db][1][0])
assert np.isclose(tab[dc][0][1][0], tabA[da][0][1][0]*tabB[db][0][0])
assert np.isclose(tab[dc][1][1][0], tabA[da][0][1][0]*tabB[db][1][0])
assert np.isclose(tab[dc][2][1][0], tabA[da][1][1][0]*tabB[db][0][0])
assert np.isclose(tab[dc][3][1][0], tabA[da][1][1][0]*tabB[db][1][0])
assert np.isclose(tab[dc][4][1][0], tabA[da][2][1][0]*tabB[db][0][0])
assert np.isclose(tab[dc][5][1][0], tabA[da][2][1][0]*tabB[db][1][0])
assert tab[dc][0][2][0] == 0.0
assert tab[dc][1][2][0] == 0.0
assert tab[dc][2][2][0] == 0.0
assert tab[dc][3][2][0] == 0.0
assert tab[dc][4][2][0] == 0.0
assert tab[dc][5][2][0] == 0.0
def test_TFE_2Dx1D_vector_triangle_hcurl():
S = UFCTriangle()
T = UFCInterval()
Ned1 = Nedelec(S, 1)
P1 = Lagrange(T, 1)
elt = Hcurl(TensorProductElement(Ned1, P1))
assert elt.value_shape() == (3,)
tab = elt.tabulate(1, [(0.1, 0.2, 0.3)])
tabA = Ned1.tabulate(1, [(0.1, 0.2)])
tabB = P1.tabulate(1, [(0.3,)])
for da, db in [[(0, 0), (0,)], [(1, 0), (0,)], [(0, 1), (0,)], [(0, 0), (1,)]]:
dc = da + db
assert np.isclose(tab[dc][0][0][0], tabA[da][0][0][0]*tabB[db][0][0])
assert np.isclose(tab[dc][1][0][0], tabA[da][0][0][0]*tabB[db][1][0])
assert np.isclose(tab[dc][2][0][0], tabA[da][1][0][0]*tabB[db][0][0])
assert np.isclose(tab[dc][3][0][0], tabA[da][1][0][0]*tabB[db][1][0])
assert np.isclose(tab[dc][4][0][0], tabA[da][2][0][0]*tabB[db][0][0])
assert np.isclose(tab[dc][5][0][0], tabA[da][2][0][0]*tabB[db][1][0])
assert np.isclose(tab[dc][0][1][0], tabA[da][0][1][0]*tabB[db][0][0])
assert np.isclose(tab[dc][1][1][0], tabA[da][0][1][0]*tabB[db][1][0])
assert np.isclose(tab[dc][2][1][0], tabA[da][1][1][0]*tabB[db][0][0])
assert np.isclose(tab[dc][3][1][0], tabA[da][1][1][0]*tabB[db][1][0])
assert np.isclose(tab[dc][4][1][0], tabA[da][2][1][0]*tabB[db][0][0])
assert np.isclose(tab[dc][5][1][0], tabA[da][2][1][0]*tabB[db][1][0])
assert tab[dc][0][2][0] == 0.0
assert tab[dc][1][2][0] == 0.0
assert tab[dc][2][2][0] == 0.0
assert tab[dc][3][2][0] == 0.0
assert tab[dc][4][2][0] == 0.0
assert tab[dc][5][2][0] == 0.0
def test_TFE_2Dx1D_vector_triangle_hdiv_rotate():
S = UFCTriangle()
T = UFCInterval()
Ned1 = Nedelec(S, 1)
P1_DG = DiscontinuousLagrange(T, 1)
elt = Hdiv(TensorProductElement(Ned1, P1_DG))
assert elt.value_shape() == (3,)
tab = elt.tabulate(1, [(0.1, 0.2, 0.3)])
tabA = Ned1.tabulate(1, [(0.1, 0.2)])
tabB = P1_DG.tabulate(1, [(0.3,)])
for da, db in [[(0, 0), (0,)], [(1, 0), (0,)], [(0, 1), (0,)], [(0, 0), (1,)]]:
dc = da + db
assert np.isclose(tab[dc][0][0][0], tabA[da][0][1][0]*tabB[db][0][0])
assert np.isclose(tab[dc][1][0][0], tabA[da][0][1][0]*tabB[db][1][0])
assert np.isclose(tab[dc][2][0][0], tabA[da][1][1][0]*tabB[db][0][0])
assert np.isclose(tab[dc][3][0][0], tabA[da][1][1][0]*tabB[db][1][0])
assert np.isclose(tab[dc][4][0][0], tabA[da][2][1][0]*tabB[db][0][0])
assert np.isclose(tab[dc][5][0][0], tabA[da][2][1][0]*tabB[db][1][0])
assert np.isclose(tab[dc][0][1][0], -tabA[da][0][0][0]*tabB[db][0][0])
assert np.isclose(tab[dc][1][1][0], -tabA[da][0][0][0]*tabB[db][1][0])
assert np.isclose(tab[dc][2][1][0], -tabA[da][1][0][0]*tabB[db][0][0])
assert np.isclose(tab[dc][3][1][0], -tabA[da][1][0][0]*tabB[db][1][0])
assert np.isclose(tab[dc][4][1][0], -tabA[da][2][0][0]*tabB[db][0][0])
assert np.isclose(tab[dc][5][1][0], -tabA[da][2][0][0]*tabB[db][1][0])
assert tab[dc][0][2][0] == 0.0
assert tab[dc][1][2][0] == 0.0
assert tab[dc][2][2][0] == 0.0
assert tab[dc][3][2][0] == 0.0
assert tab[dc][4][2][0] == 0.0
assert tab[dc][5][2][0] == 0.0
def test_TFE_2Dx1D_vector_triangle_hcurl_rotate():
S = UFCTriangle()
T = UFCInterval()
RT1 = RaviartThomas(S, 1)
P1 = Lagrange(T, 1)
elt = Hcurl(TensorProductElement(RT1, P1))
assert elt.value_shape() == (3,)
tab = elt.tabulate(1, [(0.1, 0.2, 0.3)])
tabA = RT1.tabulate(1, [(0.1, 0.2)])
tabB = P1.tabulate(1, [(0.3,)])
for da, db in [[(0, 0), (0,)], [(1, 0), (0,)], [(0, 1), (0,)], [(0, 0), (1,)]]:
dc = da + db
assert np.isclose(tab[dc][0][0][0], -tabA[da][0][1][0]*tabB[db][0][0])
assert np.isclose(tab[dc][1][0][0], -tabA[da][0][1][0]*tabB[db][1][0])
assert np.isclose(tab[dc][2][0][0], -tabA[da][1][1][0]*tabB[db][0][0])
assert np.isclose(tab[dc][3][0][0], -tabA[da][1][1][0]*tabB[db][1][0])
assert np.isclose(tab[dc][4][0][0], -tabA[da][2][1][0]*tabB[db][0][0])
assert np.isclose(tab[dc][5][0][0], -tabA[da][2][1][0]*tabB[db][1][0])
assert np.isclose(tab[dc][0][1][0], tabA[da][0][0][0]*tabB[db][0][0])
assert np.isclose(tab[dc][1][1][0], tabA[da][0][0][0]*tabB[db][1][0])
assert np.isclose(tab[dc][2][1][0], tabA[da][1][0][0]*tabB[db][0][0])
assert np.isclose(tab[dc][3][1][0], tabA[da][1][0][0]*tabB[db][1][0])
assert np.isclose(tab[dc][4][1][0], tabA[da][2][0][0]*tabB[db][0][0])
assert np.isclose(tab[dc][5][1][0], tabA[da][2][0][0]*tabB[db][1][0])
assert tab[dc][0][2][0] == 0.0
assert tab[dc][1][2][0] == 0.0
assert tab[dc][2][2][0] == 0.0
assert tab[dc][3][2][0] == 0.0
assert tab[dc][4][2][0] == 0.0
assert tab[dc][5][2][0] == 0.0
def test_TFE_2Dx1D_vector_quad_hdiv():
T = UFCInterval()
P1 = Lagrange(T, 1)
P0 = DiscontinuousLagrange(T, 0)
P1_DG = DiscontinuousLagrange(T, 1)
P1P0 = Hdiv(TensorProductElement(P1, P0))
P0P1 = Hdiv(TensorProductElement(P0, P1))
horiz_elt = EnrichedElement(P1P0, P0P1)
elt = Hdiv(TensorProductElement(horiz_elt, P1_DG))
assert elt.value_shape() == (3,)
tab = elt.tabulate(1, [(0.1, 0.2, 0.3)])
tA = P1.tabulate(1, [(0.1,)])
tB = P0.tabulate(1, [(0.2,)])
tC = P0.tabulate(1, [(0.1,)])
tD = P1.tabulate(1, [(0.2,)])
tE = P1_DG.tabulate(1, [(0.3,)])
for da, db, dc in [[(0,), (0,), (0,)], [(1,), (0,), (0,)], [(0,), (1,), (0,)], [(0,), (0,), (1,)]]:
dd = da + db + dc
assert np.isclose(tab[dd][0][0][0], -tA[da][0][0]*tB[db][0][0]*tE[dc][0][0])
assert np.isclose(tab[dd][1][0][0], -tA[da][0][0]*tB[db][0][0]*tE[dc][1][0])
assert np.isclose(tab[dd][2][0][0], -tA[da][1][0]*tB[db][0][0]*tE[dc][0][0])
assert np.isclose(tab[dd][3][0][0], -tA[da][1][0]*tB[db][0][0]*tE[dc][1][0])
assert tab[dd][4][0][0] == 0.0
assert tab[dd][5][0][0] == 0.0
assert tab[dd][6][0][0] == 0.0
assert tab[dd][7][0][0] == 0.0
assert tab[dd][0][1][0] == 0.0
assert tab[dd][1][1][0] == 0.0
assert tab[dd][2][1][0] == 0.0
assert tab[dd][3][1][0] == 0.0
assert np.isclose(tab[dd][4][1][0], tC[da][0][0]*tD[db][0][0]*tE[dc][0][0])
assert np.isclose(tab[dd][5][1][0], tC[da][0][0]*tD[db][0][0]*tE[dc][1][0])
assert np.isclose(tab[dd][6][1][0], tC[da][0][0]*tD[db][1][0]*tE[dc][0][0])
assert np.isclose(tab[dd][7][1][0], tC[da][0][0]*tD[db][1][0]*tE[dc][1][0])
assert tab[dd][0][2][0] == 0.0
assert tab[dd][1][2][0] == 0.0
assert tab[dd][2][2][0] == 0.0
assert tab[dd][3][2][0] == 0.0
assert tab[dd][4][2][0] == 0.0
assert tab[dd][5][2][0] == 0.0
assert tab[dd][6][2][0] == 0.0
assert tab[dd][7][2][0] == 0.0
def test_TFE_2Dx1D_vector_quad_hcurl():
T = UFCInterval()
P1 = Lagrange(T, 1)
P0 = DiscontinuousLagrange(T, 0)
P1P0 = Hcurl(TensorProductElement(P1, P0))
P0P1 = Hcurl(TensorProductElement(P0, P1))
horiz_elt = EnrichedElement(P1P0, P0P1)
elt = Hcurl(TensorProductElement(horiz_elt, P1))
assert elt.value_shape() == (3,)
tab = elt.tabulate(1, [(0.1, 0.2, 0.3)])
tA = P1.tabulate(1, [(0.1,)])
tB = P0.tabulate(1, [(0.2,)])
tC = P0.tabulate(1, [(0.1,)])
tD = P1.tabulate(1, [(0.2,)])
tE = P1.tabulate(1, [(0.3,)])
for da, db, dc in [[(0,), (0,), (0,)], [(1,), (0,), (0,)], [(0,), (1,), (0,)], [(0,), (0,), (1,)]]:
dd = da + db + dc
assert tab[dd][0][0][0] == 0.0
assert tab[dd][1][0][0] == 0.0
assert tab[dd][2][0][0] == 0.0
assert tab[dd][3][0][0] == 0.0
assert np.isclose(tab[dd][4][0][0], tC[da][0][0]*tD[db][0][0]*tE[dc][0][0])
assert np.isclose(tab[dd][5][0][0], tC[da][0][0]*tD[db][0][0]*tE[dc][1][0])
assert np.isclose(tab[dd][6][0][0], tC[da][0][0]*tD[db][1][0]*tE[dc][0][0])
assert np.isclose(tab[dd][7][0][0], tC[da][0][0]*tD[db][1][0]*tE[dc][1][0])
assert np.isclose(tab[dd][0][1][0], tA[da][0][0]*tB[db][0][0]*tE[dc][0][0])
assert np.isclose(tab[dd][1][1][0], tA[da][0][0]*tB[db][0][0]*tE[dc][1][0])
assert np.isclose(tab[dd][2][1][0], tA[da][1][0]*tB[db][0][0]*tE[dc][0][0])
assert np.isclose(tab[dd][3][1][0], tA[da][1][0]*tB[db][0][0]*tE[dc][1][0])
assert tab[dd][4][1][0] == 0.0
assert tab[dd][5][1][0] == 0.0
assert tab[dd][6][1][0] == 0.0
assert tab[dd][7][1][0] == 0.0
assert tab[dd][0][2][0] == 0.0
assert tab[dd][1][2][0] == 0.0
assert tab[dd][2][2][0] == 0.0
assert tab[dd][3][2][0] == 0.0
assert tab[dd][4][2][0] == 0.0
assert tab[dd][5][2][0] == 0.0
assert tab[dd][6][2][0] == 0.0
assert tab[dd][7][2][0] == 0.0
def test_flattened_against_tpe_quad():
T = UFCInterval()
P1 = Lagrange(T, 1)
tpe_quad = TensorProductElement(P1, P1)
flattened_quad = FlattenedDimensions(tpe_quad)
assert tpe_quad.value_shape() == ()
tpe_tab = tpe_quad.tabulate(1, [(0.1, 0.2)])
flattened_tab = flattened_quad.tabulate(1, [(0.1, 0.2)])
for da, db in [[(0,), (0,)], [(1,), (0,)], [(0,), (1,)]]:
dc = da + db
assert np.isclose(tpe_tab[dc][0][0], flattened_tab[dc][0][0])
assert np.isclose(tpe_tab[dc][1][0], flattened_tab[dc][1][0])
assert np.isclose(tpe_tab[dc][2][0], flattened_tab[dc][2][0])
assert np.isclose(tpe_tab[dc][3][0], flattened_tab[dc][3][0])
def test_flattened_against_tpe_hex():
T = UFCInterval()
P1 = Lagrange(T, 1)
tpe_quad = TensorProductElement(P1, P1)
tpe_hex = TensorProductElement(tpe_quad, P1)
flattened_quad = FlattenedDimensions(tpe_quad)
flattened_hex = FlattenedDimensions(TensorProductElement(flattened_quad, P1))
assert tpe_quad.value_shape() == ()
tpe_tab = tpe_hex.tabulate(1, [(0.1, 0.2, 0.3)])
flattened_tab = flattened_hex.tabulate(1, [(0.1, 0.2, 0.3)])
for da, db, dc in [[(0,), (0,), (0,)], [(1,), (0,), (0,)], [(0,), (1,), (0,)], [(0,), (0,), (1,)]]:
dd = da + db + dc
assert np.isclose(tpe_tab[dd][0][0], flattened_tab[dd][0][0])
assert np.isclose(tpe_tab[dd][1][0], flattened_tab[dd][1][0])
assert np.isclose(tpe_tab[dd][2][0], flattened_tab[dd][2][0])
assert np.isclose(tpe_tab[dd][3][0], flattened_tab[dd][3][0])
assert np.isclose(tpe_tab[dd][4][0], flattened_tab[dd][4][0])
assert np.isclose(tpe_tab[dd][5][0], flattened_tab[dd][5][0])
assert np.isclose(tpe_tab[dd][6][0], flattened_tab[dd][6][0])
assert np.isclose(tpe_tab[dd][7][0], flattened_tab[dd][7][0])
if __name__ == '__main__':
import os
pytest.main(os.path.abspath(__file__))