pax_global_header00006660000000000000000000000064147052066600014520gustar00rootroot0000000000000052 comment=6132a17dd6cd92a8d06f0960b7266642cc1fca78 fenics-dolfinx-0.9.0/000077500000000000000000000000001470520666000144365ustar00rootroot00000000000000fenics-dolfinx-0.9.0/.cmake-format000066400000000000000000000220631470520666000170100ustar00rootroot00000000000000_help_parse: Options affecting listfile parsing parse: _help_additional_commands: - Specify structure for custom cmake functions additional_commands: foo: flags: - BAR - BAZ kwargs: HEADERS: '*' SOURCES: '*' DEPENDS: '*' _help_override_spec: - Override configurations per-command where available override_spec: {} _help_vartags: - Specify variable tags. vartags: [] _help_proptags: - Specify property tags. proptags: [] _help_format: Options affecting formatting. format: _help_disable: - Disable formatting entirely, making cmake-format a no-op disable: false _help_line_width: - How wide to allow formatted cmake files line_width: 80 _help_tab_size: - How many spaces to tab for indent tab_size: 2 _help_use_tabchars: - If true, lines are indented using tab characters (utf-8 - 0x09) instead of space characters (utf-8 0x20). - In cases where the layout would require a fractional tab - character, the behavior of the fractional indentation is - governed by use_tabchars: false _help_fractional_tab_policy: - If is True, then the value of this variable - indicates how fractional indentions are handled during - whitespace replacement. If set to 'use-space', fractional - indentation is left as spaces (utf-8 0x20). If set to - '`round-up` fractional indentation is replaced with a single' - tab character (utf-8 0x09) effectively shifting the column - to the next tabstop fractional_tab_policy: use-space _help_max_subgroups_hwrap: - If an argument group contains more than this many sub-groups - (parg or kwarg groups) then force it to a vertical layout. max_subgroups_hwrap: 2 _help_max_pargs_hwrap: - If a positional argument group contains more than this many - arguments, then force it to a vertical layout. max_pargs_hwrap: 6 _help_max_rows_cmdline: - If a cmdline positional group consumes more than this many - lines without nesting, then invalidate the layout (and nest) max_rows_cmdline: 2 _help_separate_ctrl_name_with_space: - If true, separate flow control names from their parentheses - with a space separate_ctrl_name_with_space: false _help_separate_fn_name_with_space: - If true, separate function names from parentheses with a - space separate_fn_name_with_space: false _help_dangle_parens: - If a statement is wrapped to more than one line, than dangle - the closing parenthesis on its own line. dangle_parens: true _help_dangle_align: - If the trailing parenthesis must be 'dangled' on its on - 'line, then align it to this reference: `prefix`: the start' - 'of the statement, `prefix-indent`: the start of the' - 'statement, plus one indentation level, `child`: align to' - the column of the arguments dangle_align: prefix _help_min_prefix_chars: - If the statement spelling length (including space and - parenthesis) is smaller than this amount, then force reject - nested layouts. min_prefix_chars: 4 _help_max_prefix_chars: - If the statement spelling length (including space and - parenthesis) is larger than the tab width by more than this - amount, then force reject un-nested layouts. max_prefix_chars: 10 _help_max_lines_hwrap: - If a candidate layout is wrapped horizontally but it exceeds - this many lines, then reject the layout. max_lines_hwrap: 2 _help_line_ending: - What style line endings to use in the output. line_ending: unix _help_command_case: - Format command names consistently as 'lower' or 'upper' case command_case: canonical _help_keyword_case: - Format keywords consistently as 'lower' or 'upper' case keyword_case: unchanged _help_always_wrap: - A list of command names which should always be wrapped always_wrap: [] _help_enable_sort: - If true, the argument lists which are known to be sortable - will be sorted lexicographicall enable_sort: true _help_autosort: - If true, the parsers may infer whether or not an argument - list is sortable (without annotation). autosort: false _help_require_valid_layout: - By default, if cmake-format cannot successfully fit - everything into the desired linewidth it will apply the - last, most agressive attempt that it made. If this flag is - True, however, cmake-format will print error, exit with non- - zero status code, and write-out nothing require_valid_layout: false _help_layout_passes: - A dictionary mapping layout nodes to a list of wrap - decisions. See the documentation for more information. layout_passes: {} _help_markup: Options affecting comment reflow and formatting. markup: _help_bullet_char: - What character to use for bulleted lists bullet_char: '*' _help_enum_char: - What character to use as punctuation after numerals in an - enumerated list enum_char: . _help_first_comment_is_literal: - If comment markup is enabled, don't reflow the first comment - block in each listfile. Use this to preserve formatting of - your copyright/license statements. first_comment_is_literal: true _help_literal_comment_pattern: - If comment markup is enabled, don't reflow any comment block - which matches this (regex) pattern. Default is `None` - (disabled). literal_comment_pattern: null _help_fence_pattern: - Regular expression to match preformat fences in comments - default= ``r'^\s*([`~]{3}[`~]*)(.*)$'`` fence_pattern: ^\s*([`~]{3}[`~]*)(.*)$ _help_ruler_pattern: - Regular expression to match rulers in comments default= - '``r''^\s*[^\w\s]{3}.*[^\w\s]{3}$''``' ruler_pattern: ^\s*[^\w\s]{3}.*[^\w\s]{3}$ _help_explicit_trailing_pattern: - If a comment line matches starts with this pattern then it - is explicitly a trailing comment for the preceeding - argument. Default is '#<' explicit_trailing_pattern: '#<' _help_hashruler_min_length: - If a comment line starts with at least this many consecutive - hash characters, then don't lstrip() them off. This allows - for lazy hash rulers where the first hash char is not - separated by space hashruler_min_length: 10 _help_canonicalize_hashrulers: - If true, then insert a space between the first hash char and - remaining hash chars in a hash ruler, and normalize its - length to fill the column canonicalize_hashrulers: true _help_enable_markup: - enable comment markup parsing and reflow enable_markup: true _help_lint: Options affecting the linter lint: _help_disabled_codes: - a list of lint codes to disable disabled_codes: [] _help_function_pattern: - regular expression pattern describing valid function names function_pattern: '[0-9a-z_]+' _help_macro_pattern: - regular expression pattern describing valid macro names macro_pattern: '[0-9A-Z_]+' _help_global_var_pattern: - regular expression pattern describing valid names for - variables with global (cache) scope global_var_pattern: '[A-Z][0-9A-Z_]+' _help_internal_var_pattern: - regular expression pattern describing valid names for - variables with global scope (but internal semantic) internal_var_pattern: _[A-Z][0-9A-Z_]+ _help_local_var_pattern: - regular expression pattern describing valid names for - variables with local scope local_var_pattern: '[a-z][a-z0-9_]+' _help_private_var_pattern: - regular expression pattern describing valid names for - privatedirectory variables private_var_pattern: _[0-9a-z_]+ _help_public_var_pattern: - regular expression pattern describing valid names for public - directory variables public_var_pattern: '[A-Z][0-9A-Z_]+' _help_argument_var_pattern: - regular expression pattern describing valid names for - function/macro arguments and loop variables. argument_var_pattern: '[a-z][a-z0-9_]+' _help_keyword_pattern: - regular expression pattern describing valid names for - keywords used in functions or macros keyword_pattern: '[A-Z][0-9A-Z_]+' _help_max_conditionals_custom_parser: - In the heuristic for C0201, how many conditionals to match - within a loop in before considering the loop a parser. max_conditionals_custom_parser: 2 _help_min_statement_spacing: - Require at least this many newlines between statements min_statement_spacing: 1 _help_max_statement_spacing: - Require no more than this many newlines between statements max_statement_spacing: 2 max_returns: 6 max_branches: 12 max_arguments: 5 max_localvars: 15 max_statements: 50 _help_encode: Options affecting file encoding encode: _help_emit_byteorder_mark: - If true, emit the unicode byte-order mark (BOM) at the start - of the file emit_byteorder_mark: false _help_input_encoding: - Specify the encoding of the input file. Defaults to utf-8 input_encoding: utf-8 _help_output_encoding: - Specify the encoding of the output file. Defaults to utf-8. - Note that cmake only claims to support utf-8 so be careful - when using anything else output_encoding: utf-8 _help_misc: Miscellaneous configurations options. misc: _help_per_command: - A dictionary containing any per-command configuration - overrides. Currently only `command_case` is supported. per_command: {} fenics-dolfinx-0.9.0/AUTHORS000066400000000000000000000117061470520666000155130ustar00rootroot00000000000000Authors/contributors in alphabetical order: Ido Akkerman (-) Martin Sandve Alnæs (C) Francesco Ballarin (C) Igor Baratta (-) Fredrik Bengzon (-) Aslak Bergersen (C) Jan Blechta (C) Rolv Erlend Bredesen (C) Jed Brown (C) Solveig Bruvoll (C) Jørgen Dokken (-) Niklas Ericsson (-) Patrick Farrell (C) Georgios Foufas (C) Tom Gustafsson (C) Joachim B Haga (C) Johan Hake (C) Jack S. Hale (C) Rasmus Hemph (-) David Heintz (-) Johan Hoffman (C) Par Ingelstrom (-) Anders E. Johansen (C) Johan Jansson (C) Niclas Jansson (C) Alexander Jarosch (C) Kristen Kaasbjerg (C) Benjamin Kehlet (C) Arve Knudsen (C) Karin Kraft (-) Aleksandra Krusper (-) Evan Lezar (C) Tianyi Li (C) Matthias Liertzer (C) Dag Lindbo (C) Glenn Terje Lines (C) Anders Logg (C) Nuno Lopes (C) Kent-Andre Mardal (C) Andreas Mark (-) Andre Massing (C) Lawrence Mitchell (C) Marco Morandini (C) Mikael Mortensen (C) Corrado Maurini (C) Pablo De Napoli (-) Harish Narayanan (C) Andreas Nilsson (-) Minh Do-Quang (-) Chris Richardson (C) Johannes Ring (C) Marie E. Rognes (C) John Rudge (-) Bartosz Sawicki (C) Nico Schlömer (C) Matthew Scroggs (-) Kristoffer Selim (C) Angelo Simone (C) Ola Skavhaug (C) Thomas Svedberg (-) Erik Svensson (C) Harald Svensson (-) Andy Terrel (C) Jim Tilander (C) Fredrik Valdmanis (C) Magnus Vikstrøm (C) Walter Villanueva (-) Shawn Walker (C) Garth N. Wells (C) Ilmar Wilbers (C) Cian Wilson (C) Ivan Yashchuk (C) Michele Zaffalon (C) Åsmund Ødegård (C) Kristian Ølgaard (C) (C) = copyright form signed (!) = missing copyright form! (-) = minor change, copyright form not signed Missing credits? Tell us and we will fix it. Send an email to fenics-steering-council@googlegroups.com fenics-dolfinx-0.9.0/CITATION.cff000066400000000000000000000027751470520666000163430ustar00rootroot00000000000000cff-version: 1.2.0 message: "If you use this software, please cite it as below." title: "DOLFINx" version: 0.8.0 date-released: 2024-04-22 url: "https://github.com/FEniCS/dolfinx" doi: "10.5281/zenodo.10047760" authors: - family-names: "FEniCS Project Developers" given-names: "The" preferred-citation: type: article authors: - family-names: "Baratta" given-names: "Igor A." orcid: "https://orcid.org/0000-0003-4298-2973" - family-names: "Dean" given-names: "Joseph P." orcid: "https://orcid.org/0000-0001-7499-3373" - family-names: "Dokken" given-names: "Jørgen S." orcid: "https://orcid.org/0000-0001-6489-8858" - family-names: "Habera" given-names: "Michal" orcid: "https://orcid.org/0000-0003-0604-8884" - family-names: "Hale" given-names: "Jack S." orcid: "https://orcid.org/0000-0001-7216-861X" - family-names: "Richardson" given-names: "Chris N." orcid: "https://orcid.org/0000-0003-3137-1392" - family-names: "Rognes" given-names: "Marie E." orcid: "https://orcid.org/0000-0002-6872-3710" - family-names: "Scroggs" given-names: "Matthew W." orcid: "https://orcid.org/0000-0002-4658-2443" - family-names: "Sime" given-names: "Nathan" orcid: "https://orcid.org/0000-0002-2319-048X" - family-names: "Wells" given-names: "Garth N." orcid: "https://orcid.org/0000-0001-5291-7951" doi: "10.5281/zenodo.10447666" journal: "preprint" title: "DOLFINx: the next generation FEniCS problem solving environment" year: 2023 fenics-dolfinx-0.9.0/CODE_OF_CONDUCT.md000066400000000000000000000072501470520666000172410ustar00rootroot00000000000000Code of Conduct =============== Our Pledge ---------- In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. Our Standards ------------- Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others’ private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting Our Responsibilities -------------------- Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. Scope ----- This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. Enforcement ----------- Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fenics-steering-council@googlegroups.com. Alternatively, you may report individually to one of the members of the Steering Council. Complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project’s leadership. If you feel that your report has not been followed up satisfactorily, then you may contact our parent organisation NumFOCUS at info@numfocus.org for further redress. Attribution ----------- This Code of Conduct is adapted from the Contributor Covenant, version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html. Adaptations ----------- * Allow reporting to individual Steering Council members * Added the option to contact NumFOCUS for further redress. For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faqfenics-dolfinx-0.9.0/CONTRIBUTING.md000066400000000000000000000020021470520666000166610ustar00rootroot00000000000000## How to contribute ### Reporting bugs If you find a bug in DOLFINx, please report it on the [GitHub issue tracker](https://github.com/fenics/dolfinx/issues/new?labels=bug). ### Suggesting enhancements If you want to suggest a new feature or an improvement of a current feature, you can submit this on the [issue tracker](https://github.com/fenics/dolfinx/issues). ### Submitting a pull request To contribute code DOLFINx, create a pull request. If you want to contribute, but are unsure where to start, have a look at the [issues labelled "good first issue"](https://github.com/FEniCS/dolfinx/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22). For substantial changes/contributions, please start with an issue or start a discussion on Slack. On opening a pull request, unit tests will run on GitHub CI. You can click on these in the pull request to see where (if anywhere) the tests are failing. ### Code of conduct We expect all our contributors to follow the [code of conduct](CODE_OF_CONDUCT.md). fenics-dolfinx-0.9.0/COPYING000066400000000000000000001051361470520666000154770ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . Note: Individual files contain the following tag instead of the full license text. SPDX-License-Identifier: LGPL-3.0-or-later This enables machine processing of license information based on the SPDX License Identifiers that are here available: http://spdx.org/licenses/fenics-dolfinx-0.9.0/COPYING.LESSER000066400000000000000000000167261470520666000165010ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. fenics-dolfinx-0.9.0/ChangeLog.rst000066400000000000000000002326631470520666000170330ustar00rootroot00000000000000Change log ========== The new change log for DOLFINx is maintained using `GitHub Releases `_. Below are the historical change logs related to the previous generation `DOLFIN solver `_. 2018.2.0.dev0 ------------- - No changes 2018.1.0.dev0 (no release) -------------------------- - Forked DOLFINx 2017.2.0 (2017-12-05) --------------------- - Remove ``UnitQuadMesh`` and ``UnitHexMesh``. Now use ``UnitSquareMesh`` and ``UnitCubeMesh`` with cell type qualifiers. - Remove ``MeshEditor::open`` without cell type. Now you must explicitly specify CellType when opening a ``Mesh`` with ``MeshEditor``. - Rename ``Mesh::size_global`` to ``Mesh::num_entities_global``. - Remove ``Mesh::size``. Use ``Mesh::num_entities`` instead. - Improved mesh topology computation performance. - Remove excessive calls to MPI init. It may now be necessary in some cases to explicitly intialise MPI. - Improvements to sparsity pattern computation. - Addition of some interfaces using ``Eigen::Map/ref`` in addition to ``dolfin::Array(View)``. ``dolfin::Array(View)``interfaces will be removed in favour of Eigen interfaces in the next release. - Update pkg-config (dolfin.pc) file. - CMake modernisations, with more use of exported targets. - Add experimental pybind11 generated Python interface. Will replace the SWIG generated interface in the 2018.1 release. - Remove redundant SLEPc interfaces. Fixes issue `#908 `_. - Bump required Boost version to 1.55. - Remove PETScUserPreconditioner (was unused and untested). - Remove VTK plotting backend. Plotting is no longer available from the C++ interface. Basic plotting is available using ``matplotlib`` and ``x3dom`` backends via the ``plot()`` free function in the Python interface. Users are advised to move to e.g. Paraview for more demanding plotting needs. - Updates for ``ufc::finite_element::evaluate_vertex_values`` interface change. - Add new methods ``XDMFFile::write_checkpoint``, ``XDMFFile::read_checkpoint`` to write and read (checkpoint) functions. - Implement marking vertex and edge mesh functions by ``SubDomain::mark()`` using ``on_boundary`` flag. - Fix quadratic scaling in PETSc matrix allocation with global dofs; assembly with ``Real`` space now exhibits linear scaling in number of dofs. - Add assembly for quadrilateral and hexahedral meshes with CG and DG elements. - Updates for some demos and tests to show usage of quadrilateral and hexahedral meshes. - Deprecate ``CellSize`` (equivalent to ``2*Circumradius``) in favour of new ``CellDiameter``; add ``MinCellEdgeLength`` and ``MaxCellEdgeLength`` - Deprecate subclassing of ``Expression`` in Python; new Python class ``UserExpression`` introduced for user overloads - Deprecate ``VertexFunction``, ``EdgeFunction``, ``FaceFunction``, ``FacetFunction``, ``CellFunction``; use ``MeshFunction`` instead 2017.1.0 (2017-05-09) --------------------- - Refactor PETScLUSolver to use functionality from PETScKrylovSolver. Simplify interface for solving transposed systems. Fixes #815. - Switch default Python version to Python 3. Use `-DDOLFIN_USE_PYTHON3=off` to build with Python 2. - Remove redundant ``solve_transpose`` functions (use solve with bool argument instead) - Remove OpenMpAsssmebler - Remove MPI communicator as argument in GenericVector::init functions (communicator should be passed via constructor) - Remove ``Function::operator[+-*/]`` to prevent memory corruption problems (does not affect Python interface) - Fix XDMF3 output of time series. The default output method is now to assume that all functions have different meshes, and that the meshes change from time step to time step. Two parameters control the output, one limits each function to only one mesh for the whole time series, turn off the default on parameter ``rewrite_function_mesh`` to enable this. You can also make all functions share the same mesh and time series, which currently is better supported in Paraview than the alternative, turn on ``functions_share_mesh`` for this. These two parameters can also be combined in case all functions share the same mesh at all time steps. This creates minimal size files. - Add ``PETScSNESSolver`` and ``PETScTAOSolver`` constructor accepting both communicator and type - Expression("f[0]*f[1]", f=obj) notation now supported for non-scalar GenericFunction obj - Expression("f", f=obj) notation now supports obj of MeshFunction types (only cell based) - Fix MPI deadlock in case of instant compilation failure - Allow using ``Timer`` as context manager and add ``timed`` decorator to measure timings of functions and methods - Add ``NonlinearProblem::J_pc`` and support preconditioning matrix in ``NewtonSolver``, ``PETScSNESSolver`` and ``PETScTAOSolver`` 2016.2.0 [2016-11-30] --------------------- - Updates to XDMFFile interface, now fully supporting MeshFunction and MeshValueCollection with multiple named datasets in one file (useful for volume/boundary markers). Time series now only when a time is explicitly specified for each step. Full support for ASCII/XML XDMF. - Improved X3DOM support - Improved detection of UFC - Add CMake option `-DDOLFIN_USE_PYTHON3` to create a Python 3 build - Require CMake version 3.5 or higher - Add pylit to generate demo doc from rst - More careful checks of Dirichlet BC function spaces - Change definition of FunctionSpace::component() - Adaptive solving now works for tensor-valued unknowns - Improve logging of PETSc errors; details logged at level TRACE 2016.1.0 [2016-06-23] --------------------- - Remove support for 'uint'-valued MeshFunction (replaced by 'size_t') - Major performance improvements and simplifications of the XDMF IO. - Remove Zoltan graph partitioning interface - Add new algorithm for computing mesh entiites. Typical speed-up of two with gcc and four with clang. Reduced memory usage for meshes with irregularly numbered cells. - Remove STLMatrix, STLVector, MUMPSLUSolver and PastixLUSolver classes - Remove PETScPreconditioner::set_near_nullspace and add PETScMatrix::set_near_nullspace - Build system updates for VTK 7.0 - Remove XDMF from File interface. XDMF is XML based, and has many possibilities for file access, which are not accessible through the limited File interface and "<<" ">>" operators. Instead of File, use XDMFFile, and use XDMFFile.read() and XDMFFile.write() for I/O. Demos and tests have been updated to show usage. XDMF now also supports ASCII I/O in serial, useful for compatibility with users who do not have the HDF5 library available. - Require polynomial degree or finite element for Expressions in the Python interface (fixes Issue #355, https://bitbucket.org/fenics-project/dolfin/issues/355) - Switch to Google Test framwork for C++ unit tests - Fix bug when reading domain data from mesh file for a ghosted mesh - Add interface for manipulating mesh geometry using (higher-order) FE functions: free functions set_coordinates, get_coordinates, create_mesh - Fix bug when reading domain data from mesh file for a ghosted mesh. - Remove reference versions of constructors for many classes that store a pointer/reference to the object passed to the constructor. This is an intrusive interface change for C++ users, but necessary to improve code maintainabilty and to improve memory safety. The Python interface is (virtually) unaffected. - Remove class SubSpace. Using FunctionSpace::sub(...) instead - Remove reference versions constructors of NonlinearVariationalSolver - Remove setting of bounds from NonlinearVariationalSolver (was already available through NonlinearVariationalProblem) - Update Trilinos support to include Amesos2, and better support from Python - Rewrite interface of TensorLayout and SparsityPattern; local-to-global maps now handled using new IndexMap class; GenericSparsityPattern class removed - Remove QT (was an optional dependency) - PETScTAOSolver::solve() now returns a pair of number of iterations (std::size_t) and whether iteration converged (bool) - Better quality refinement in 2D in Plaza algorithm, by choosing refinement pattern based on max/min edge ratio - Removed refine_cell() method in CellTypes - Enable marker refinement to work in parallel for 1D meshes too - Add std::abort to Python exception hook to avoid parallel deadlocks - Extend dof_to_vertex_map with unowned dofs, thus making dof_to_vertex_map an inverse of vertex_to_dof_map - Clean-up in PyDOLFIN function space design, issue #576 - Deprecate MixedFunctionSpace and EnrichedFunctionSpace in favour of initialization by suitable UFL element - Add experimental matplotlib-based plotting backend, see mplot demo - Remove method argument of DirichletBC::get_boundary_values() - Change return types of free functions adapt() to shared_ptr 1.6.0 [2015-07-28] ------------------ - Remove redundant pressure boundary condition in Stokes demos - Require Point in RectangleMesh and BoxMesh constructors - Remove BinaryFile (TimeSeries now requires HDF5) - Add (highly experimental) support for Tpetra matrices and vectors from Trilinos, interfacing to Belos, Amesos2, IfPack2 and Muelu. - Enable (highly experimental) support for Quadrilateral and Hexahedral meshes, including some I/O, but no assembly yet. - Enable UMFPACK and CHOLMOD solvers with Eigen backend - Add an MPI_Comm to logger, currently defaulted to MPI_COMM_WORLD allowing better control over output in parallel - Experimental output of quadratic geometry in XDMF files, allows more exact visualisation of P2 Functions - Remove GenericMatrix::compressed (see Issue #61) - Deprecate and PETScKryloveSolver::set_nullspace() and add PETScMatrix::set_nullspace() - Remove uBLAS backend - Remove UmfpackLUSolver and CholmodSolver - Add EigenMatrix/Vector::data() - Remove GenericMatrix/Vector::data() and GenericMatrix/Vector::data() (to use backends that support data(), cast first to backend type, e.g. A = A.as_backend_type() - Remove cmake.local, replaced by fenics-install-component.sh - Make interior facet integrals define - and + cells ordered by cell_domains value. - Remove deprecated arguments *_domains from assemble() and Form(). - Change measure definition notation from dx[mesh_function] to dx(subdomain_data=mesh_function). - Set locale to "C" before reading from file - Change GenericDofMap::cell_dofs return type from const std::vector<..>& to ArrayView - Add ArrayView class for views into arrays - Change fall back linear algebra backend to Eigen - Add Eigen linear algebra backend - Remove deprecated GenericDofMap::geometric_dim function (fixes Issue #443) - Add quadrature rules for multimesh/cut-cell integration up to order 6 - Implement MPI reductions and XML ouput of Table class - list_timings() is now collective and returns MPI average across processes - Add dump_timings_to_xml() - Add enum TimingType { wall, user, system } for selecting wall-clock, user and system time in timing routines - Bump required SWIG version to 3.0.3 - Increase default maximum iterations in NewtonSolver to 50. - Deprecate Python free function homogenize(bc) in favour of member function DirichletBC::homogenize() 1.5.0 [2015-01-12] ------------------ - DG demos working in parallel - Simplify re-use of LU factorisations - CMake 3 compatibility - Make underlying SLEPc object accessible - Full support for linear algebra backends with 64-bit integers - Add smoothed aggregation AMG elasticity demo - Add support for slepc4py - Some self-assignment fixes in mesh data structures - Deprecated GenericDofMap::geometric_dimension() - Experimental support for ghosted meshes (overlapping region in parallel) - Significant memory reduction in dofmap storage - Re-write dofmap construction with significant performance and scaling improvements in parallel - Switch to local (process-wise) indexing for dof indices - Support local (process-wise) indexing in linear algerbra backends - Added support for PETSc 3.5, require version >= 3.3 - Exposed DofMap::tabulate_local_to_global_dofs, MeshEntity::sharing_processes in Python - Added GenericDofmap::local_dimension("all"|"owned"|"unowned") - Added access to SLEPc or slepc4py EPS object of SLEPcEigenSolver (requires slepc4py version >= 3.5.1) - LinearOperator can now be accessed using petsc4py - Add interface (PETScTAOSolver) for the PETSc nonlinear (bound-constrained) optimisation solver (TAO) - Add GenericMatrix::nnz() function to return number of nonzero entries in matrix (fixes #110) - Add smoothed aggregation algerbraic multigrid demo for elasticity - Add argument 'function' to project, to store the result into a preallocated function - Remove CGAL dependency and mesh generation, now provided by mshr - Python 2.7 required - Add experimental Python 3 support. Need swig version 3.0.3 or later - Move to py.test, speed up unit tests and make tests more robust in parallel - Repeated initialization of PETScMatrix is now an error - MPI interface change: num_processes -> size, process_number -> rank - Add optional argument project(..., function=f), to avoid superfluous allocation - Remove excessive printing of points during extrapolation - Clean up DG demos by dropping restrictions of Constants: c('+') -> c - Fix systemassembler warning when a and L both provide the same subdomain data. - Require mesh instead of cell argument to FacetArea, FacetNormal, CellSize, CellVolume, SpatialCoordinate, Circumradius, MinFacetEdgeLength, MaxFacetEdgeLength - Remove argument reset_sparsity to assemble() - Simplify assemble() and Form() signature: remove arguments mesh, coefficients, function_spaces, common_cell. These are now all found by inspecting the UFL form - Speed up assembly of forms with multiple integrals depending on different functions, e.g. f*dx(1) + g*dx(2). - Handle accessing of GenericVectors using numpy arrays in python layer instead of in hard-to-maintain C++ layer - Add support for mpi groups in jit-compilation - Make access to HDFAttributes more dict like - Add 1st and 2nd order Rush Larsen schemes for the PointIntegralSolver - Add vertex assembler for PointIntegrals - Add support for assembly of custom_integral - Add support for multimesh assembly, function spaces, dofmaps and functions - Fix to Cell-Point collision detection to prevent Points inside the mesh from falling between Cells due to rounding errors - Enable reordering of cells and vertices in parallel via SCOTCH and the Giibs-Poole-Stockmeyer algorithm - Efficiency improvements in dof assignment in parallel, working on HPC up to 24000 cores - Introduction of PlazaRefinement methods based on refinement of the Mesh skeleton, giving better quality refinement in 3D in parallel - Basic support for 'ghost cells' allowing integration over interior facets in parallel 1.4.0 [2014-06-02] ------------------ - Feature: Add set_diagonal (with GenericVector) to GenericMatrix - Fix many bugs associated with cell orientations on manifolds - Force all global dofs to be ordered last and to be on the last process in parallel - Speed up dof reordering of mixed space including global dofs by removing the latter from graph reordering - Force all dofs on a shared facet to be owned by the same process - Add FEniCS ('fenics') Python module, identical with DOLFIN Python module - Add function Form::set_some_coefficients() - Remove Boost.MPI dependency - Change GenericMatrix::compresss to return a new matrix (7be3a29) - Add function GenericTensor::empty() - Deprecate resizing of linear algebra via the GenericFoo interfaces (fixes #213) - Deprecate MPI::process_number() in favour of MPI::rank(MPI_Comm) - Use PETSc built-in reference counting to manage lifetime of wrapped PETSc objects - Remove random access function from MeshEntityIterator (fixes #178) - Add support for VTK 6 (fixes #149) - Use MPI communicator in interfaces. Permits the creation of distributed and local objects, e.g. Meshes. - Reduce memory usage and increase speed of mesh topology computation 1.3.0 [2014-01-07] ------------------ - Feature: Enable assignment of sparse MeshValueCollections to MeshFunctions - Feature: Add free function assign that is used for sub function assignment - Feature: Add class FunctionAssigner that cache dofs for sub function assignment - Fix runtime dependency on checking swig version - Deprecate DofMap member methods vertex_to_dof_map and dof_to_vertex_map - Add free functions: vertex_to_dof_map and dof_to_vertex_map, and correct the ordering of the map. - Introduce CompiledSubDomain a more robust version of compiled_subdomains, which is now deprecated - CMake now takes care of calling the correct generate-foo script if so needed. - Feature: Add new built-in computational geometry library (BoundingBoxTree) - Feature: Add support for setting name and label to an Expression when constructed - Feature: Add support for passing a scalar GenericFunction as default value to a CompiledExpression - Feature: Add support for distance queries for 3-D meshes - Feature: Add PointIntegralSolver, which uses the MultiStageSchemes to solve local ODEs at Vertices - Feature: Add RKSolver and MultiStageScheme for general time integral solvers - Feature: Add support for assigning a Function with linear combinations of Functions, which lives in the same FunctionSpace - Added Python wrapper for SystemAssembler - Added a demo using compiled_extension_module with separate source files - Fixes for NumPy 1.7 - Remove DOLFIN wrapper code (moved to FFC) - Add set_options_prefix to PETScKrylovSolver - Remove base class BoundarCondition - Set block size for PETScMatrix when available from TensorLayout - Add support to get block compressed format from STLMatrix - Add detection of block structures in the dofmap for vector equations - Expose PETSc GAMG parameters - Modify SystemAssembler to support separate assembly of A and b 1.2.0 [2013-03-24] ------------------ - Fixes bug where child/parent hierarchy in Python were destroyed - Add utility script dolfin-get-demos - MeshFunctions in python now support iterable protocol - Add timed VTK output for Mesh and MeshFunction in addtion to Functions - Expose ufc::dofmap::tabulate_entity_dofs to GenericDofMap interface - Expose ufc::dofmap::num_entity_dofs to GenericDofMap interface - Allow setting of row dof coordinates in preconditioners (only works with PETSc backed for now) - Expose more PETSc/ML parameters - Improve speed to tabulating coordinates in some DofMap functions - Feature: Add support for passing a Constant as default value to a CompiledExpression - Fix bug in dimension check for 1-D ALE - Remove some redundant graph code - Improvements in speed of parallel dual graph builder - Fix bug in XMDF output for cell-based Functions - Fixes for latest version of clang compiler - LocalSolver class added to efficiently solve cell-wise problems - New implementation of periodic boundary conditions. Now incorporated into the dofmap - Optional arguments to assemblers removed - SymmetricAssembler removed - Domains for assemblers can now only be attached to forms - SubMesh can now be constructed without a CellFunction argument, if the MeshDomain contains marked celldomains. - MeshDomains are propagated to a SubMesh during construction - Simplify generation of a MeshFunction from MeshDomains: No need to call mesh_function with mesh - Rename dolfin-config.cmake to DOLFINConfig.cmake - Use CMake to configure JIT compilation of extension modules - Feature: Add vertex_to_dof_map to DofMap, which map vertex indices to dolfin dofs - Feature: Add support for solving on m dimensional meshes embedded in n >= m dimensions 1.1.0 [2013-01-08] ------------------ - Add support for solving singular problems with Krylov solvers (PETSc only) - Add new typedef dolfin::la_index for consistent indexing with linear algebra backends. - Change default unsigned integer type to std::size_t - Add support to attaching operator null space to preconditioner (required for smoothed aggregation AMG) - Add basic interface to the PETSc AMG preconditioner - Make SCOTCH default graph partitioner (GNU-compatible free license, unlike ParMETIS) - Add scalable construction of mesh dual graph for mesh partitioning - Improve performance of mesh building in parallel - Add mesh output to SVG - Add support for Facet and cell markers to mesh converted from Diffpack - Add support for Facet and cell markers/attributes to mesh converted from Triangle - Change interface for auto-adaptive solvers: these now take the goal functional as a constructor argument - Add memory usage monitor: monitor_memory_usage() - Compare mesh hash in interpolate_vertex_values - Add hash() for Mesh and MeshTopology - Expose GenericVector::operator{+=,-=,+,-}(double) to Python - Add function Function::compute_vertex_values not needing a mesh argument - Add support for XDMF and HDF5 - Add new interface LinearOperator for matrix-free linear systems - Remove MTL4 linear algebra backend - Rename down_cast --> as_type in C++ / as_backend_type in Python - Remove KrylovMatrix interface - Remove quadrature classes - JIT compiled C++ code can now include a dolfin namespace - Expression string parsing now understand C++ namespace such as std::cosh - Fix bug in Expression so one can pass min, max - Fix bug in SystemAssembler, where mesh.init(D-1, D) was not called before assemble - Fix bug where the reference count of Py_None was not increased - Fix bug in reading TimeSeries of size smaller than 3 - Improve code design for Mesh FooIterators to avoid dubious down cast - Bug fix in destruction of PETSc user preconditioners - Add CellVolume(mesh) convenience wrapper to Python interface for UFL function - Fix bug in producing outward pointing normals of BoundaryMesh - Fix bug introduced by SWIG 2.0.5, where typemaps of templated typedefs are not handled correctly - Fix bug introduced by SWIG 2.0.5, which treated uint as Python long - Add check that sample points for TimeSeries are monotone - Fix handling of parameter "report" in Krylov solvers - Add new linear algebra backend "PETScCusp" for GPU-accelerated linear algebra - Add sparray method in the Python interface of GenericMatrix, requires scipy.sparse - Make methods that return a view of contiguous c-arrays, via a NumPy array, keep a reference from the object so it wont get out of scope - Add parameter: "use_petsc_signal_handler", which enables/disable PETSc system signals - Avoid unnecessary resize of result vector for A*b - MPI functionality for distributing values between neighbours - SystemAssembler now works in parallel with topological/geometric boundary search - New symmetric assembler with ability for stand-alone RHS assemble - Major speed-up of DirichletBC computation and mesh marking - Major speed-up of assembly of functions and expressions - Major speed-up of mesh topology computation - Add simple 2D and 3D mesh generation (via CGAL) - Add creation of mesh from triangulations of points (via CGAL) - Split the SWIG interface into six combined modules instead of one - Add has_foo to easy check what solver and preconditioners are available - Add convenience functions for listing available linear_algebra_backends - Change naming convention for cpp unit tests test.cpp -> Foo.cpp - Added cpp unit test for GenericVector::operator{-,+,*,/}= for all la backends - Add functionality for rotating meshes - Add mesh generation based on NETGEN constructive solid geometry - Generalize SparsityPattern and STLMatrix to support column-wise storage - Add interfaces to wrap PaStiX and MUMPS direct solvers - Add CoordinateMatrix class - Make STLMatrix work in parallel - Remove all tr1::tuple and use boost::tuple - Fix wrong link in Python quick reference. 1.0.0 [2011-12-07] ------------------ - Change return value of IntervalCell::facet_area() 0.0 --> 1.0. - Recompile all forms with FFC 1.0.0 - Fix for CGAL 3.9 on OS X - Improve docstrings for Box and Rectangle - Check number of dofs on local patch in extrapolation 1.0-rc2 [2011-11-28] -------------------- - Fix bug in 1D mesh refinement - Fix bug in handling of subdirectories for TimeSeries - Fix logic behind vector assignment, especially in parallel 1.0-rc1 [2011-11-21] -------------------- - 33 bugs fixed - Implement traversal of bounding box trees for all codimensions - Edit and improve all error messages - Added [un]equality operator to FunctionSpace - Remove batch compilation of Expression (Expressions) from Python interface - Added get_value to MeshValueCollection - Added assignment operator to MeshValueCollection 1.0-beta2 [2011-10-26] ---------------------- - Change search path of parameter file to ~/.fenics/dolfin_parameters.xml - Add functions Parameters::has_parameter, Parameters::has_parameter_set - Added option to store all connectivities in a mesh for TimeSeries (false by default) - Added option for gzip compressed binary files for TimeSeries - Propagate global parameters to Krylov and LU solvers - Fix OpenMp assemble of scalars - Make OpenMP assemble over sub domains work - DirichletBC.get_boundary_values, FunctionSpace.collapse now return a dict in Python - Changed name of has_la_backend to has_linear_algebra_backend - Added has_foo functions which can be used instead of the HAS_FOO defines - Less trict check on kwargs for compiled Expression - Add option to not right-justify tables - Rename summary --> list_timings - Add function list_linear_solver_methods - Add function list_lu_solver_methods - Add function list_krylov_solver_methods - Add function list_krylov_solver_preconditioners - Support subdomains in SystemAssembler (not for interior facet integrals) - Add option functionality apply("flush") to PETScMatrix - Add option finalize_tensor=true to assemble functions - Solver parameters can now be passed to solve - Remove deprecated function Variable::disp() - Remove deprecated function logging() - Add new class MeshValueCollection - Add new class MeshDomains replacing old storage of boundary markers as part of MeshData. The following names are no longer supported: - boundary_facet_cells - boundary_facet_numbers - boundary_indicators - material_indicators - cell_domains - interior_facet_domains - exterior_facet_domains - Rename XML tag --> - Rename SubMesh data "global_vertex_indices" --> "parent_vertex_indices" - Get XML input/output of boundary markers working again - Get FacetArea working again 1.0-beta [2011-08-11] --------------------- - Print percentage of non-zero entries when computing sparsity patterns - Use ufl.Real for Constant in Python interface - Add Dirichlet boundary condition argument to Python project function - Add remove functionality for parameter sets - Added out typemap for vector of shared_ptr objects - Fix typemap bug for list of shared_ptr objects - Support parallel XML vector io - Add support for gzipped XML output - Use pugixml for XML output - Move XML SAX parser to libxml2 SAX2 interface - Simplify XML io - Change interface for variational problems, class VariationalProblem removed - Add solve interface: solve(a == L), solve(F == 0) - Add new classes Linear/NonlinearVariationalProblem - Add new classes Linear/NonlinearVariationalSolver - Ad form class aliases ResidualForm and Jacobian form in wrapper code - Default argument to variables in Expression are passed as kwargs in the Python interface - Add has_openmp as utility function in Python interface - Add improved error reporting using dolfin_error - Use Boost to compute Legendre polynolials - Remove ode code - Handle parsing of unrecognized command-line parameters - All const std::vector& now return a read-only NumPy array - Make a robust macro for generating a NumPy array from data - Exposing low level fem functionality to Python, by adding a Cell -> ufc::cell typemap - Added ufl_cell as a method to Mesh in Python interface - Fix memory leak in Zoltan interface - Remove some 'new' for arrays in favour of std::vector - Added cell as an optional argument to Constant - Prevent the use of non contiguous NumPy arrays for most typemaps - Point can now be used to evaluate a Function or Expression in Python - Fixed dimension check for Function and Expression eval in Python - Fix compressed VTK output for tensors in 2D 0.9.11 [2011-05-16] ------------------- - Change license from LGPL v2.1 to LGPL v3 or later - Moved meshconverter to dolfin_utils - Add support for conversion of material markers for Gmsh meshes - Add support for point sources (class PointSource) - Rename logging --> set_log_active - Add parameter "clear_on_write" to TimeSeries - Add support for input/output of nested parameter sets - Check for dimensions in linear solvers - Add support for automated error control for variational problems - Add support for refinement of MeshFunctions after mesh refinement - Change order of test and trial spaces in Form constructors - Make SWIG version >= 2.0 a requirement - Recognize subdomain data in Assembler from both Form and Mesh - Add storage for subdomains (cell_domains etc) in Form class - Rename MeshData "boundary facet cells" --> "boundary_facet_cells" - Rename MeshData "boundary facet numbers" --> "boundary_facet_numbers" - Rename MeshData "boundary indicators" --> "boundary_indicators" - Rename MeshData "exterior facet domains" --> "exterior_facet_domains" - Updates for UFC 2.0.1 - Add FiniteElement::evaluate_basis_derivatives_all - Add support for VTK output of facet-based MeshFunctions - Change default log level from PROGRESS to INFO - Add copy functions to FiniteElement and DofMap - Simplify DofMap - Interpolate vector values when reading from time series 0.9.10 [2011-02-23] ------------------- - Updates for UFC 2.0.0 - Handle TimeSeries stored backward in time (automatic reversal) - Automatic storage of hierarchy during refinement - Remove directory/library 'main', merged into 'common' - dolfin_init --> init, dolfin_set_precision --> set_precision - Remove need for mesh argument to functional assembly when possible - Add function set_output_stream - Add operator () for evaluation at points for Function/Expression in C++ - Add abs() to GenericVector interface - Fix bug for local refinement of manifolds - Interface change: VariationalProblem now takes: a, L or F, (dF) - Map linear algebra objects to processes consistently with mesh partition - Lots of improvemenst to parallel assembly, dof maps and linear algebra - Add lists supported_elements and supported_elements_for_plotting in Python - Add script dolfin-plot for plotting meshes and elements from the command-line - Add support for plotting elements from Python - Add experimental OpenMP assembler - Thread-safe fixed in Function class - Make GenericFunction::eval thread-safe (Data class removed) - Optimize and speedup topology computation (mesh.init()) - Add function Mesh::clean() for cleaning out auxilliary topology data - Improve speed and accuracy of timers - Fix bug in 3D uniform mesh refinement - Add built-in meshes UnitTriangle and UnitTetrahedron - Only create output directories when they don't exist - Make it impossible to set the linear algebra backend to something illegal - Overload value_shape instead of dim for userdefined Python Expressions - Permit unset parameters - Search only for BLAS library (not cblas.h) 0.9.9 [2010-09-01] ------------------ - Change build system to CMake - Add named MeshFunctions: VertexFunction, EdgeFunction, FaceFunction, FacetFunction, CellFunction - Allow setting constant boundary conditions directly without using Constant - Allow setting boundary conditions based on string ("x[0] == 0.0") - Create missing directories if specified as part of file names - Allow re-use of preconditioners for most backends - Fixes for UMFPACK solver on some 32 bit machines - Provide access to more Hypre preconditioners via PETSc - Updates for SLEPc 3.1 - Improve and implement re-use of LU factorizations for all backends - Fix bug in refinement of MeshFunctions 0.9.8 [2010-07-01] ------------------ - Optimize and improve StabilityAnalysis. - Use own implementation of binary search in ODESolution (takes advantage of previous values as initial guess) - Improve reading ODESolution spanning multiple files - Dramatic speedup of progress bar (and algorithms using it) - Fix bug in writing meshes embedded higher dimensions to M-files - Zero vector in uBLASVector::resize() to fix spurious bug in Krylov solver - Handle named fields (u.rename()) in VTK output - Bug fix in computation of FacetArea for tetrahedrons - Add support for direct plotting of Dirichlet boundary conditions: plot(bc) - Updates for PETSc 3.1 - Add relaxation parameter to NewtonSolver - Implement collapse of renumbered dof maps (serial and parallel) - Simplification of DofMapBuilder for parallel dof maps - Improve and simplify DofMap - Add Armadillo dependency for dense linear algebra - Remove LAPACKFoo wrappers - Add abstract base class GenericDofMap - Zero small values in VTK output to avoid VTK crashes - Handle MeshFunction/markers in homogenize bc - Make preconditioner selectable in VariationalProblem (new parameter) - Read/write meshes in binary format - Add parameter "use_ident" in DirichletBC - Issue error by default when solvers don't converge (parameter "error_on_convergence") - Add option to print matrix/vector for a VariationalProblem - Trilinos backend now works in parallel - Remove Mesh refine members functions. Use free refine(...) functions instead - Remove AdapativeObjects - Add Stokes demo using the MINI element - Interface change: operator+ now used to denote enriched function spaces - Interface change: operator+ --> operator* for mixed elements - Add option 'allow_extrapolation' useful when interpolating to refined meshes - Add SpatialCoordinates demo - Add functionality for accessing time series sample times: vector_times(), mesh_times() - Add functionality for snapping mesh to curved boundaries during refinement - Add functionality for smoothing the boundary of a mesh - Speedup assembly over exterior facets by not using BoundaryMesh - Mesh refinement improvements, remove unecessary copying in Python interface - Clean PETSc and Epetra Krylov solvers - Add separate preconditioner classes for PETSc and Epetra solvers - Add function ident_zeros for inserting one on diagonal for zero rows - Add LU support for Trilinos interface 0.9.7 [2010-02-17] ------------------ - Add support for specifying facet orientation in assembly over interior facets - Allow user to choose which LU package PETScLUSolver uses - Add computation of intersection between arbitrary mesh entities - Random access to MeshEntitiyIterators - Modify SWIG flags to prevent leak when using SWIG director feature - Fix memory leak in std::vector typemaps - Add interface for SCOTCH for parallel mesh partitioning - Bug fix in SubDomain::mark, fixes bug in DirichletBC based on SubDomain::inside - Improvements in time series class, recognizing old stored values - Add FacetCell class useful in algorithms iterating over boundary facets - Rename reconstruct --> extrapolate - Remove GTS dependency 0.9.6 [2010-02-03] ------------------ - Simplify access to form compiler parameters, now integrated with global parameters - Add DofMap member function to return set of dofs - Fix memory leak in the LA interface - Do not import cos, sin, exp from NumPy to avoid clash with UFL functions - Fix bug in MTL4Vector assignment - Remove sandbox (moved to separate repository) - Remove matrix factory (dolfin/mf) - Update .ufl files for changes in UFL - Added swig/import/foo.i for easy type importing from dolfin modules - Allow optional argument cell when creating Expression - Change name of Expression argument cpparg --> cppcode - Add simple constructor (dim0, dim1) for C++ matrix Expressions - Add example demonstrating the use of cpparg (C++ code in Python) - Add least squares solver for dense systems (wrapper for DGELS) - New linear algebra wrappers for LAPACK matrices and vectors - Experimental support for reconstruction of higher order functions - Modified interface for eval() and inside() in C++ using Array - Introduce new Array class for simplified wrapping of arrays in SWIG - Improved functionality for intersection detection - Re-implementation of intersection detection using CGAL 0.9.5 [2009-12-03] ------------------ - Set appropriate parameters for symmetric eigenvalue problems with SLEPc - Fix for performance regression in recent uBLAS releases - Simplify Expression interface: f = Expression("sin(x[0])") - Simplify Constant interface: c = Constant(1.0) - Fix bug in periodic boundary conditions - Add simple script dolfin-tetgen for generating DOLFIN XML meshes from STL - Make XML parser append/overwrite parameter set when reading parameters from file - Refinement of function spaces and automatic interpolation of member functions - Allow setting global parameters for Krylov solver - Fix handling of Constants in Python interface to avoid repeated JIT compilation - Allow simple specification of subdomains in Python without needing to subclass SubDomain - Add function homogenize() for simple creation of homogeneous BCs from given BCs - Add copy constructor and possibility to change value for DirichletBC - Add simple wrapper for ufl.cell.n. FacetNormal(mesh) now works again in Python. - Support apply(A), apply(b) and apply(b, x) in PeriodicBC - Enable setting spectral transformation for SLEPc eigenvalue solver 0.9.4 [2009-10-12] ------------------ - Remove set, get and operator() methods from MeshFunction - Added const and none const T &operator[uint/MeshEntity] to MeshFunction - More clean up in SWIG interface files, remove global renames and ignores - Update Python interface to Expression, with extended tests for value ranks - Removed DiscreteFunction class - Require value_shape and geometric_dimension in Expression - Introduce new class Expression replacing user-defined Functions - interpolate_vertex_values --> compute_vertex_values - std::map replaces generated CoefficientSet code - Cleanup logic in Function class as a result of new Expression class - Introduce new Coefficient base class for form coefficients - Replace CellSize::min,max by Mesh::hmin,hmax - Use MUMPS instead of UMFPACK as default direct solver in both serial and parallel - Fix bug in SystemAssembler - Remove support for PETSc 2.3 and support PETSc 3.0.0 only - Remove FacetNormal Function. Use UFL facet normal instead. - Add update() function to FunctionSpace and DofMap for use in adaptive mesh refinement - Require mesh in constructor of functionals (C++) or argument to assemble (Python) 0.9.3 [2009-09-25] ------------------ - Add global parameter "ffc_representation" for form representation in FFC JIT compiler - Make norm() function handle both vectors and functions in Python - Speedup periodic boundary conditions and make work for mixed (vector-valued) elements - Add possibilities to use any number numpy array when assigning matrices and vectors - Add possibilities to use any integer numpy array for indices in matrices and vectors - Fix for int typemaps in PyDOLFIN - Split mult into mult and transpmult - Filter out PETSc argument when parsing command-line parameters - Extend comments to SWIG interface files - Add copyright statements to SWIG interface files (not finished yet) - Add typemaps for misc std::vector in PyDOLFIN - Remove dependencies on std_vector.i reducing SWIG wrapper code size - Use relative %includes in dolfin.i - Changed names on SWIG interface files dolfin_foo.i -> foo.i - Add function interpolate() in Python interface - Fix typmaps for uint in python 2.6 - Use TypeError instead of ValueError in typechecks in typmaps.i - Add in/out shared_ptr typemaps for PyDOLFIN - Fix JIT compiling in parallel - Add a compile_extension_module function in PyDOLFIN - Fix bug in Python vector assignment - Add support for compressed base64 encoded VTK files (using zlib) - Add support for base64 encoded VTK files - Experimental support for parallel assembly and solve - Bug fix in project() function, update to UFL syntax - Remove disp() functions and replace by info(foo, true) - Add fem unit test (Python) - Clean up SystemAssembler - Enable assemble_system through PyDOLFIN - Add 'norm' to GenericMatrix - Efficiency improvements in NewtonSolver - Rename NewtonSolver::get_iteration() to NewtonSolver::iteration() - Improvements to EpetraKrylovSolver::solve - Add constructor Vector::Vector(const GenericVector& x) - Remove SCons deprecation warnings - Memory leak fix in PETScKrylovSolver - Rename dolfin_assert -> assert and use C++ version - Fix debug/optimise flags - Remove AvgMeshSize, InvMeshSize, InvFacetArea from SpecialFunctions - Rename MeshSize -> CellSize - Rewrite parameter system with improved support for command-line parsing, localization of parameters (per class) and usability from Python - Remove OutflowFacet from SpecialFunctions - Rename interpolate(double*) --> interpolate_vertex_values(double*) - Add Python version of Cahn-Hilliard demo - Fix bug in assemble.py - Permit interpolation of functions between non-matching meshes - Remove Function::Function(std::string filename) - Transition to new XML io - Remove GenericSparsityPattern::sort - Require sorted/unsorted parameter in SparsityPattern constructor - Improve performance of SparsityPattern::insert - Replace enums with strings for linear algebra and built-in meshes - Allow direct access to Constant value - Initialize entities in MeshEntity constructor automatically and check range - Add unit tests to the memorycheck - Add call to clean up libxml2 parser at exit - Remove unecessary arguments in DofMap member functions - Remove reference constructors from DofMap, FiniteElement and FunctionSpace - Use a shared_ptr to store the mesh in DofMap objects - Interface change for wrapper code: PoissonBilinearForm --> Poisson::BilinearForm - Add function info_underline() for writing underlined messages - Rename message() --> info() for "compatibility" with Python logging module - Add elementwise multiplication in GeneriVector interface - GenericVector interface in PyDOLFIN now support the sequence protocol - Rename of camelCaps functions names: fooBar --> foo_bar Note: mesh.numVertices() --> mesh.num_vertices(), mesh.numCells() --> mesh.num_cells() - Add slicing capabilities for GenericMatrix interface in PyDOLFIN (only getitem) - Add slicing capabilities for GenericVector interface in PyDOLFIN - Add sum to GenericVector interface 0.9.2 [2009-04-07] ------------------ - Enable setting parameters for Newton solver in VariationalProblem - Simplified and improved implementation of C++ plotting, calling Viper on command-line - Remove precompiled elements and projections - Automatically interpolate user-defined functions on assignment - Add new built-in function MeshCoordinates, useful in ALE simulations - Add new constructor to Function class, Function(V, "vector.xml") - Remove class Array (using std::vector instead) - Add vector_mapping data to MeshData - Use std::vector instead of Array in MeshData - Add assignment operator and copy constructor for MeshFunction - Add function mesh.move(other_mesh) for moving mesh according to matching mesh (for FSI) - Add function mesh.move(u) for moving mesh according to displacement function (for FSI) - Add macro dolfin_not_implemented() - Add new interpolate() function for interpolation of user-defined function to discrete - Make _function_space protected in Function - Added access to crs data from python for uBLAS and MTL4 backend 0.9.1 [2009-02-17] ------------------ - Check Rectangle and Box for non-zero dimensions - ODE solvers now solve the dual problem - New class SubMesh for simple extraction of matching meshes for sub domains - Improvements of multiprecision ODE solver - Fix Function class copy constructor - Bug fixes for errornorm(), updates for new interface - Interface update for MeshData: createMeshFunction --> create_mesh_function etc - Interface update for Rectangle and Box - Add elastodynamics demo - Fix memory leak in IntersectionDetector/GTSInterface - Add check for swig version, in jit and compile functions - Bug fix in dolfin-order script for gzipped files - Make shared_ptr work across C++/Python interface - Replace std::tr1::shared_ptr with boost::shared_ptr - Bug fix in transfinite mean-value interpolation - Less annoying progress bar (silent when progress is fast) - Fix assignment operator for MeshData - Improved adaptive mesh refinement (recursive Rivara) producing better quality meshes 0.9.0 [2009-01-05] ------------------ - Cross-platform fixes - PETScMatrix::copy fix - Some Trilinos fixes - Improvements in MeshData class - Do not use initial guess in Newton solver - Change OutflowFacet to IsOutflowFacet and change syntax - Used shared_ptr for underling linear algebra objects - Cache subspaces in FunctionSpace - Improved plotting, now support plot(grad(u)), plot(div(u)) etc - Simple handling of JIT-compiled functions - Sign change (bug fix) in increment for Newton solver - New class VariationalProblem replacing LinearPDE and NonlinearPDE - Parallel parsing and partitioning of meshes (experimental) - Add script dolfin-order for ordering mesh files - Add new class SubSpace (replacing SubSystem) - Add new class FunctionSpace - Complete redesign of Function class hierarchy, now a single Function class - Increased use of shared_ptr in Function, FunctionSpace, etc - New interface for boundary conditions, form not necessary - Allow simple setting of coefficient functions based on names (not their index) - Don't order mesh automatically, meshes must now be ordered explicitly - Simpler definition of user-defined functions (constructors not necessary) - Make mesh iterators const to allow for const-correct Mesh code 0.8.1 [2008-10-20] ------------------ - Add option to use ML multigrid preconditioner through PETSc - Interface change for ODE solvers: uBLASVector --> double* - Remove homotopy solver - Remove typedef real, now using plain double instead - Add various operators -=, += to GenericMatrix - Don't use -Werror when compiling SWIG generated code - Remove init(n) and init(m, n) from GenericVector/Matrix. Use resize and zero instead - Add new function is_combatible() for checking compatibility of boundary conditions - Use x as initial guess in Krylov solvers (PETSc, uBLAS, ITL) - Add new function errornorm() - Add harmonic ALE mesh smoothing - Refinements of Graph class - Add CholmodCholeskySlover (direct solver for symmetric matrices) - Implement application of Dirichlet boundary conditions within assembly loop - Improve efficiency of SparsityPattern - Allow a variable number of smoothings - Add class Table for pretty-printing of tables - Add experimental MTL4 linear algebra backend - Add OutflowFacet to SpecialFunctions for DG transport problems - Remove unmaintained OpenDX file format - Fix problem with mesh smoothing near nonconvex corners - Simple projection of functions in Python - Add file format: XYZ for use with Xd3d - Add built-in meshes: UnitCircle, Box, Rectangle, UnitSphere 0.8.0 [2008-06-23] ------------------ - Fix input of matrix data from XML - Add function normalize() - Integration with VMTK for reading DOLFIN XML meshes produced by VMTK - Extend mesh XML format to handle boundary indicators - Add support for attaching arbitrarily named data to meshes - Add support for dynamically choosing the linear algebra backend - Add Epetra/Trilinos linear solvers - Add setrow() to matrix interface - Add new solver SingularSolver for solving singular (pressure) systems - Add MeshSize::min(), max() for easy computation of smallest/largest mesh size - LinearSolver now handles all backends and linear solvers - Add access to normal in Function, useful for inflow boundary conditions - Remove GMRES and LU classes, use solve() instead - Improve solve() function, now handles both LU and Krylov + preconditioners - Add ALE mesh interpolation (moving mesh according to new boundary coordinates) 0.7.3 [2008-04-30] ------------------ - Add support for Epetra/Trilinos - Bug fix for order of values in interpolate_vertex_values, now according to UFC - Boundary meshes are now always oriented with respect to outward facet normals - Improved linear algebra, both in C++ and Python - Make periodic boundary conditions work in Python - Fix saving of user-defined functions - Improve plotting - Simple computation of various norms of functions from Python - Evaluation of Functions at arbitrary points in a mesh - Fix bug in assembling over exterior facets (subdomains were ignored) - Make progress bar less annoying - New scons-based build system replaces autotools - Fix bug when choosing iterative solver from Python 0.7.2 [2008-02-18] ------------------ - Improve sparsity pattern generator efficiency - Dimension-independent sparsity pattern generator - Add support for setting strong boundary values for DG elements - Add option setting boundary conditions based on geometrical search - Check UMFPACK return argument for warnings/errors - Simplify setting simple Dirichlet boundary conditions - Much improved integration with FFC in PyDOLFIN - Caching of forms by JIT compiler now works - Updates for UFC 1.1 - Catch exceptions in PyDOLFIN - Work on linear algebra interfaces GenericTensor/Matrix/Vector - Add linear algebra factory (backend) interface - Add support for 1D meshes - Make Assembler independent of linear algebra backend - Add manager for handling sub systems (PETSc and MPI) - Add parallel broadcast of Mesh and MeshFunction - Add experimental support for parallel assembly - Use PETSc MPI matrices when running in parallel - Add predefined functions FacetNormal and AvgMeshSize - Add left/right/crisscross options for UnitSquare - Add more Python demos - Add support for Exodus II format in dolfin-convert - Autogenerate docstrings for PyDOLFIN - Various small bug fixes and improvements 0.7.1 [2007-08-31] ------------------ - Integrate FFC form language into PyDOLFIN - Just-in-time (JIT) compilation of variational forms - Conversion from from Diffpack grid format to DOLFIN XML - Name change: BoundaryCondition --> DirichletBC - Add support for periodic boundary conditions: class PeriodicBC - Redesign default linear algebra interface (Matrix, Vector, KrylovSolver, etc) - Add function to return Vector associated with a DiscreteFunction 0.7.0-1 [2007-06-22] -------------------- - Recompile all forms with latest FFC release - Remove typedefs SparseMatrix and SparseVector - Fix includes in LinearPDE - Rename DofMaps -> DofMapSet 0.7.0 [2007-06-20] ------------------ - Move to UFC interface for code generation - Major rewrite, restructure, cleanup - Add support for Brezzi-Douglas-Marini (BDM) elements - Add support for Raviart-Thomas (RT) elements - Add support for Discontinuous Galerkin (DG) methods - Add support for mesh partitioning (through SCOTCH) - Handle both UMFPACK and UFSPARSE - Local mesh refinement - Mesh smoothing - Built-in plotting (through Viper) - Cleanup log system - Numerous fixes for mesh, in particular MeshFunction - Much improved Python bindings for mesh - Fix Python interface for vertex and cell maps in boundary computation 0.6.4 [2006-12-01] ------------------ - Switch from Python Numeric to Python NumPy - Improved mesh Python bindings - Add input/output support for MeshFunction - Change Mesh::vertices() --> Mesh::coordinates() - Fix bug in output of mesh to MATLAB format - Add plasticty module (experimental) - Fix configure test for Python dev (patch from Åsmund Ødegård) - Add mesh benchmark - Fix memory leak in mesh (data not deleted correctly in MeshTopology) - Fix detection of curses libraries - Remove Tecplot output format 0.6.3 [2006-10-27] ------------------ - Move to new mesh library - Remove dolfin-config and move to pkg-config - Remove unused classes PArray, PList, Table, Tensor - Visualization of 2D solutions in OpenDX is now supported (3D supported before) - Add support for evaluation of functionals - Fix bug in Vector::sum() for uBLAS vectors 0.6.2-1 [2006-09-06] -------------------- - Fix compilation error when using --enable-petsc (dolfin::uBLASVector::PETScVector undefined) 0.6.2 [2006-09-05] ------------------ - Finish chapter in manual on linear algebra - Enable PyDOLFIN by default, use --disable-pydolfin to disable - Disable PETSc by default, use --enable-petsc to enable - Modify ODE solver interface for u0() and f() - Add class ConvectionMatrix - Readd classes LoadVector, MassMatrix, StiffnessMatrix - Add matrix factory for simple creation of standard finite element matrices - Collect static solvers in LU and GMRES - Bug fixes for Python interface PyDOLFIN - Enable use of direct solver for ODE solver (experimental) - Remove demo bistable - Restructure and cleanup linear algebra - Use UMFPACK for LU solver with uBLAS matrix types - Add templated wrapper class for different uBLAS matrix types - Add ILU preconditioning for uBLAS matrices - Add Krylov solver for uBLAS sparse matrices (GMRES and BICGSTAB) - Add first version of new mesh library (NewMesh, experimental) - Add Parametrized::readParameters() to trigger reading of values on set() - Remove output of zeros in Octave matrix file format - Use uBLAS-based vector for Vector if PETSc disabled - Add wrappers for uBLAS compressed_matrix class - Compute eigenvalues using SLEPc (an extension of PETSc) - Clean up assembly and linear algebra - Add function to solve Ax = b for dense matrices and dense vectors - Make it possible to compile without PETSc (--disable-petsc) - Much improved ODE solvers - Complete multi-adaptive benchmarks reaction and wave - Assemble boundary integrals - FEM class cleaned up. - Fix multi-adaptive benchmark problem reaction - Small fixes for Intel C++ compiler version 9.1 - Test for Intel C++ compiler and configure appropriately - Add new classes DenseMatrix and DenseVector (wrappers for ublas) - Fix bug in conversion from Gmsh format 0.6.1 [2006-03-28] ------------------ - Regenerate build system in makedist script - Update for new FFC syntax: BasisFunction --> TestFunction, TrialFunction - Fixes for conversion script dolfin-convert - Initial cleanups and fixes for ODE solvers - Numerous small fixes to improve portability - Remove dolfin:: qualifier on output << in Parameter.h - Don't use anonymous classes in demos, gives errors with some compilers - Remove KrylovSolver::solver() - Fix bug in convection-diffusion demo (boundary condition for pressure), use direct solver - LinearPDE and NewonSolver use umfpack LU solver by default (if available) when doing direct solve - Set PETSc matrix type through Matrix constructor - Allow linear solver and preconditioner type to be passed to NewtonSolver - Fix bug in Stokes demos (wrong boundary conditions) - Cleanup Krylov solver - Remove KrylovSolver::setPreconditioner() etc. and move to constructors - Remove KrylovSolver::setRtol() etc. and replace with parameters - Fix remaining name changes: noFoo() --> numFoo() - Add Cahn-Hilliard equation demo - NewtonSolver option to use residual or incremental convergence criterion - Add separate function to nls to test for convergence of Newton iterations - Fix bug in dolfin-config (wrong version number) 0.6.0 [2006-03-01] ------------------ - Fix bug in XML output format (writing multiple objects) - Fix bug in XML matrix output format (handle zero rows) - Add new nonlinear PDE demo - Restructure PDE class to use envelope-letter design - Add precompiled finite elements for q <= 5 - Add FiniteElementSpec and factor function for FiniteElement - Add input/output of Function to DOLFIN XML - Name change: dof --> node - Name change: noFoo() --> numFoo() - Add conversion from gmsh format in dolfin-convert script - Updates for PETSc 2.3.1 - Add new type of Function (constant) - Simplify use of Function class - Add new demo Stokes + convection-diffusion - Add new demo Stokes (equal-order stabilized) - Add new demo Stokes (Taylor-Hood) - Add new parameter for KrylovSolvers: "monitor convergence" - Add conversion script dolfin-convert for various mesh formats - Add new demo elasticity - Move poisson demo to src/demo/pde/poisson - Move to Mercurial (hg) from CVS - Use libtool to build libraries (including shared) 0.5.12 [2006-01-12] ------------------- - Make Stokes solver dimension independent (2D/3D) - Make Poisson solver dimension independent (2D/3D) - Fix sparse matrix output format for MATLAB - Modify demo problem for Stokes, add exact solution and compute error - Change interface for boundary conditions: operator() --> eval() - Add two benchmark problems for the Navier-Stokes solver - Add support for 2D/3D selection in Navier-Stokes solver - Move tic()/toc() to timing.h - Navier-Stokes solver back online - Make Solver a subclass of Parametrized - Add support for localization of parameters - Redesign of parameter system 0.5.11 [2005-12-15] ------------------- - Add script monitor for monitoring memory usage - Remove meminfo.h (not portable) - Remove dependence on parameter system in log system - Don't use drand48() (not portable) - Don't use strcasecmp() (not portable) - Remove sysinfo.h and class System (not portable) - Don't include (not portable) - Change ::show() --> ::disp() everywhere - Clean out old quadrature classes on triangles and tetrahedra - Clean out old sparse matrix code - Update chapter on Functions in manual - Use std::map to store parameters - Implement class KrylovSolver - Name change: Node --> Vertex - Add nonlinear solver demos - Add support for picking sub functions and components of functions - Update interface for FiniteElement for latest FFC version - Improve and restructure implementation of the Function class - Dynamically adjust safety factor during integration - Improve output Matrix::disp() - Check residual at end of time step, reject step if too large - Implement Vector::sum() - Implement nonlinear solver - New option for ODE solver: "save final solution" --> solution.data - New ODE test problem: reaction - Fixes for automake 1.9 (nobase_include_HEADERS) - Reorganize build system, remove fake install and require make install - Add checks for non-standard PETSc component HYPRE in NSE solver - Make GMRES solver return the number of iterations - Add installation script for Python interface - Add Matrix Market format (Haiko Etzel) - Automatically reinitialize GMRES solver when system size changes - Implement cout << for class Vector 0.5.10 [2005-10-11] ------------------- - Modify ODE solver interface: add T to constructor - Fix compilation on AMD 64 bit systems (add -fPIC) - Add new BLAS mode for form evaluation - Change enum types in File to lowercase - Change default file type for .m to Octave - Add experimental Python interface PyDOLFIN - Fix compilation for gcc 4.0 0.5.9 [2005-09-23] ------------------ - Add Stokes module - Support for arbitrary mixed elements through FFC - VTK output interface now handles time-dependent functions automatically - Fix cout for empty matrix - Change dolfin_start() --> dolfin_end() - Add chapters to manual: about, log system, parameters, reference elements, installation, contributing, license - Use new template fenicsmanual.cls for manual - Add compiler flag -U__STRICT_ANSI__ when compiling under Cygwin - Add class EigenvalueSolver 0.5.8 [2005-07-05] ------------------ - Add new output format Paraview/VTK (Garth N. Wells) - Update Tecplot interface - Move to PETSc 2.3.0 - Complete support for general order Lagrange elements in triangles and tetrahedra - Add test problem in src/demo/fem/convergence/ for general Lagrange elements - Make FEM::assemble() estimate the number of nonzeros in each row - Implement Matrix::init(M, N, nzmax) - Add Matrix::nz(), Matrix::nzsum() and Matrix::nzmax() - Improve Mesh::disp() - Add FiniteElement::disp() and FEM::disp() (useful for debugging) - Remove old class SparseMatrix - Change FEM::setBC() --> FEM::applyBC() - Change Mesh::tetrahedrons --> Mesh::tetrahedra - Implement Dirichlet boundary conditions for tetrahedra - Implement Face::contains(const Point& p) - Add test for shape dimension of mesh and form in FEM::assemble() - Move src/demo/fem/ demo to src/demo/fem/simple/ - Add README file in src/demo/poisson/ (simple manual) - Add simple demo program src/demo/poisson/ - Update computation of alignment of faces to match FFC/FIAT 0.5.7 [2005-06-23] ------------------ - Clean up ODE test problems - Implement automatic detection of sparsity pattern from given matrix - Clean up homotopy solver - Implement automatic computation of Jacobian - Add support for assembly of non-square systems (Andy Terrel) - Make ODE solver report average number of iterations - Make progress bar write first update at 0% - Initialize all values of u before solution in multi-adaptive solver, not only components given by dependencies - Allow user to modify and verify a converging homotopy path - Make homotopy solver save a list of the solutions - Add Matrix::norm() - Add new test problem for CES economy - Remove cast from Parameter to const char* (use std::string) - Make solution data filename optional for homotopy solver - Append homotopy solution data to file during solution - Add dolfin::seed(int) for optionally seeding random number generator - Remove dolfin::max,min (use std::max,min) - Add polynomial-integer (true polynomial) form of general CES system - Compute multi-adaptive efficiency index - Updates for gcc 4.0 (patches by Garth N. Wells) - Add Matrix::mult(const real x[], uint row) (temporary fix, assumes uniprocessor case) - Add Matrix::mult(const Vector& x, uint row) (temporary fix, assumes uniprocessor case) - Update shortcuts MassMatrix and StiffnessMatrix to new system - Add missing friend to Face.h (reported by Garth N. Wells) 0.5.6 [2005-05-17] ------------------ - Implementation of boundary conditions for general order Lagrange (experimental) - Use interpolation function automatically generated by FFC - Put computation of map into class AffineMap - Clean up assembly - Use dof maps automatically generated by FFC (experimental) - Modify interface FiniteElement for new version of FFC - Update ODE homotopy test problems - Add cross product to class Point - Sort mesh entities locally according to ordering used by FIAT and FFC - Add new format for dof maps (preparation for higher-order elements) - Code cleanups: NewFoo --> Foo complete - Updates for new version of FFC (0.1.7) - Bypass log system when finalizing PETSc (may be out of scope) 0.5.5 [2005-04-26] ------------------ - Fix broken log system, curses works again - Much improved multi-adaptive time-stepping - Move elasticity module to new system based on FFC - Add boundary conditions for systems - Improve regulation of time steps - Clean out old assembly classes - Clean out old form classes - Remove kernel module map - Remove kernel module element - Move convection-diffusion module to new system based on FFC - Add iterators for cell neighbors of edges and faces - Implement polynomial for of CES economy - Rename all new linear algebra classes: NewFoo --> Foo - Clean out old linear algebra - Speedup setting of boundary conditions (add MAT_KEEP_ZEROED_ROWS) - Fix bug for option --disable-curses 0.5.4 [2005-03-29] ------------------ - Remove option to compile with PETSc 2.2.0 (2.2.1 required) - Make make install work again (fix missing includes) - Add support for mixing multiple finite elements (through FFC) - Improve functionality of homotopy solver - Simple creation of piecewise linear functions (without having an element) - Simple creation of piecewise linear elements - Add support of automatic creation of simple meshes (unit cube, unit square) 0.5.3 [2005-02-26] ------------------ - Change to PETSc version 2.2.1 - Add flag --with-petsc= to configure script - Move Poisson's equation to system based on FFC - Add support for automatic creation of homotopies - Make all ODE solvers automatically handle complex ODEs: (M) z' = f(z,t) - Implement version of mono-adaptive solver for implicit ODEs: M u' = f(u,t) - Implement Newton's method for multi- and mono-adaptive ODE solvers - Update PETSc wrappers NewVector, NewMatrix, and NewGMRES - Fix initialization of PETSc - Add mono-adaptive cG(q) and dG(q) solvers (experimental) - Implementation of new assebly: NewFEM, using output from FFC - Add access to mesh for nodes, cells, faces and edges - Add Tecplot I/O interface; contributed by Garth N. Wells 0.5.2 [2005-01-26] ------------------ - Benchmarks for DOLFIN vs PETSc (src/demo/form and src/demo/test) - Complete rewrite of the multi-adaptive ODE solver (experimental) - Add wrapper for PETSc GMRES solver - Update class Point with new operators - Complete rewrite of the multi-adaptive solver to improve performance - Add PETSc wrappers NewMatrix and NewVector - Add DOLFIN/PETSc benchmarks 0.5.1 [2004-11-10] ------------------ - Experimental support for automatic generation of forms using FFC - Allow user to supply Jacobian to ODE solver - Add optional test to check if a dependency already exists (Sparsity) - Modify sparse matrix output (Matrix::show()) - Add FGMRES solver in new format (patch from eriksv) - Add non-const version of quick-access of sparse matrices - Add linear mappings for simple computation of derivatives - Add check of matrix dimensions for ODE sparsity pattern - Include missing cmath in Function.cpp 0.5.0 [2004-08-18] ------------------ - First prototype of new form evaluation system - New classes Jacobi, SOR, Richardson (preconditioners and linear solvers) - Add integrals on the boundary (ds), partly working - Add maps from boundary of reference cell - Add evaluation of map from reference cell - New Matrix functions: max, min, norm, and sum of rows and columns (erik) - Derivatives/gradients of ElementFunction (coefficients f.ex.) implemented - Enable assignment to all elements of a NewArray - Add functions Boundary::noNodes(), noFaces(), noEdges() - New class GaussSeidel (preconditioner and linear solver) - New classes Preconditioner and LinearSolver - Bug fix for tetrahedral mesh refinement (ingelstrom) - Add iterators for Edge and Face on Boundary - Add functionality to Map: bdet() and cell() - Add connectivity face-cell and edge-cell - New interface for assembly: Galerkin --> FEM - Bug fix for PDE systems of size > 3 0.4.11 [2004-04-23] ------------------- - Add multigrid solver (experimental) - Update manual 0.4.10 ------ - Automatic model reduction (experimental) - Fix bug in ParticleSystem (divide by mass) - Improve control of integration (add function ODE::update()) - Load/save parameters in XML-format - Add assembly test - Add simple StiffnessMatrix, MassMatrix, and LoadVector - Change dK --> dx - Change dx() --> ddx() - Add support for GiD file format - Add performance tests for multi-adaptivity (both stiff and non-stiff) - First version of Newton for the multi-adaptive solver - Test for Newton for the multi-adaptive solver 0.4.9 ----- - Add multi-adaptive solver for the bistable equation - Add BiCGSTAB solver (thsv) - Fix bug in SOR (thsv) - Improved visual program for OpenDX - Fix OpenDX file format for scalar functions - Allow access to samples of multi-adaptive solution - New patch from thsv for gcc 3.4.0 and 3.5.0 - Make progress step a parameter - New function ODE::sparse(const Matrix& A) - Access nodes, cells, edges, faces by id - New function Matrix::lump() 0.4.8 ----- - Add support for systems (jansson and bengzon) - Add new module wave - Add new module wave-vector - Add new module elasticity - Add new module elasticity-stationary - Multi-adaptive updates - Fix compilation error in LogStream - Fix local Newton iteration for higher order elements - Init matrix to given type - Add output of cG(q) and dG(q) weights in matrix format - Fix numbering of frames from plotslab script - Add png output for plotslab script - Add script for running stiff test problems, plot solutions - Fix bug in MeshInit (node neighbors of node) - Modify output of sysinfo() - Compile with -Wall -Werror -pedantic -ansi -std=c++98 (thsv) 0.4.7 ----- - Make all stiff test problems work - Display status report also when using step() - Improve adaptive damping for stiff problems (remove spikes) - Modify Octave/Matlab format for solution data (speed improvement) - Adaptive sampling of solution (optional) - Restructure stiff test problems - Check if value of right-hand side is valid - Modify divergence test in AdaptiveIterationLevel1 0.4.6 ----- - Save vectors and matrices from Matlab/Octave (foufas) - Rename writexml.m to xmlmesh.m - Inlining of important functions - Optimize evaluation of elements - Optimize Lagrange polynomials - Optimize sparsity: use stl containers - Optimize choice of discrete residual for multi-adaptive solver - Don't save solution in benchmark proble - Improve computation of divergence factor for underdamped systems - Don't check residual on first slab for fixed time step - Decrease largest (default) time step to 0.1 - Add missing in TimeStepper - Move real into dolfin namespace 0.4.5 ----- - Rename function.h to enable compilation under Cygwin - Add new benchmark problem for multi-adaptive solver - Bug fix for ParticleSystem - Initialization of first time step - Improve time step regulation (threshold) - Improve stabilization - Improve TimeStepper interface (Ko Project) - Use iterators instead of recursively calling TimeSlab::update() - Clean up ODESolver - Add iterators for elements in time slabs and element groups - Add -f to creation of symbolic links 0.4.4 ----- - Add support for 3D graphics in Octave using Open Inventor (jj) 0.4.3 ----- - Stabilization of multi-adaptive solver (experimental) - Improved non-support for curses (--disable-curses) - New class MechanicalSystem for simulating mechanical systems - Save debug info from primal and dual (plotslab.m) - Fix bug in progress bar - Add missing include file in Components.h (kakr) - New function dolfin_end(const char* msg, ...) - Move numerical differentiation to RHS - New class Event for limited display of messages - Fix bug in LogStream (large numbers in floating point format) - Specify individual time steps for different components - Compile without warnings - Add -Werror to option enable-debug - Specify individual methods for different components - Fix bug in dGqMethods - Fix bug (delete old block) in ElementData - Add parameters for method and order - New test problem reaction - New class FixedPointIteration - Fix bug in grid refinement 0.4.2 ----- - Fix bug in computation of residual (divide by k) - Add automatic generation and solution of the dual problem - Automatic selection of file names for primal and dual - Fix bug in progress bar (TerminalLogger) - Many updates of multi-adaptive solver - Add class ODEFunction - Update function class hierarchies - Move functions to a separate directory - Store multi-adaptive solution binary on disk with cache 0.4.1 ----- - First version of multi-adaptive solver working - Clean up file formats - Start changing from int to unsigned int where necessary - Fix bool->int when using stdard in Parameter - Add NewArray and NewList (will replace Array and List) 0.4.0 ----- - Initiation of the FEniCS project - Change syntax of mesh files: grid -> mesh - Create symbolic links instead of copying files - Tanganyika -> ODE - Add Heat module - Grid -> Mesh - Move forms and mappings to separate libraries - Fix missing include of DirectSolver.h 0.3.12 ------ - Adaptive grid refinement (!) - Add User Manual - Add function dolfin_log() to turn logging on/off - Change from pointers to references for Node, Cell, Edge, Face - Update writexml.m - Add new grid files and rename old grid files 0.3.11 ------ - Add configure option --disable-curses - Grid refinement updates - Make OpenDX file format work for grids (output) - Add volume() and diameter() in cell - New classes TriGridRefinement and TetGridRefinement - Add iterators for faces and edges on a boundary - New class GridHierarchy 0.3.10 ------ - Use new boundary structure in Galerkin - Make dolfin_start() and dolfin_end() work - Make dolfin_assert() raise segmentation fault for plain text mode - Add configure option --enable-debug - Use autoreconf instead of scripts/preconfigure - Rename configure.in -> configure.ac - New class FaceIterator - New class Face - Move computation of boundary from GridInit to BoundaryInit - New class BoundaryData - New class BoundaryInit - New class Boundary - Make InitGrid compute edges - Add test program for generic matrix in src/demo/la - Clean up Grid classes - Add new class GridRefinementData - Move data from Cell to GenericCell - Make GMRES work with user defined matrix, only mult() needed - GMRES now uses only one function to compute residual() - Change Matrix structure (a modified envelope/letter) - Update script checkerror.m for Poisson - Add function dolfin_info_aptr() - Add cast to element pointer for iterators - Clean up and improve the Tensor class - New class: List - Name change: List -> Table - Name change: ShortList -> Array - Make functions in GridRefinement static - Make functions in GridInit static - Fix bug in GridInit (eriksv) - Add output to OpenDX format for 3D grids - Clean up ShortList class - Clean up List class - New class ODE, Equation replaced by PDE - Add Lorenz test problem - Add new problem type for ODEs - Add new module ode - Work on multi-adaptive ODE solver (lots of new stuff) - Work on grid refinement - Write all macros in LoggerMacros in one line - Add transpose functions to Matrix (Erik) 0.3.9 ----- - Update Krylov solver (Erik, Johan) - Add new LU factorization and LU solve (Niklas) - Add benchmark test in src/demo/bench - Add silent logger 0.3.8 ----- - Make sure dolfin-config is regenerated every time - Add demo program for cG(q) and dG(q) - Add dG(q) precalc of nodal points and weights - Add cG(q) precalc of nodal points and weights - Fix a bug in configure.in (AC_INIT with README) - Add Lagrange polynomials - Add multiplication with transpose - Add scalar products with rows and columns - Add A[i][j] index operator for quick access to dense matrix 0.3.7 ----- - Add new Matlab-like syntax like A(i,all) = x or A(3,all) = A(4,all) - Add dolfin_assert() macro enabled if debug is defined - Redesign of Matrix/DenseMatrix/SparseMatrix to use Matrix as common interface - Include missing cmath in Legendre.cpp and GaussianQuadrature.cpp 0.3.6 ----- - Add output functionality in DenseMatrix - Add high precision solver to DirectSolver - Clean up error messages in Matrix - Make solvers directly accessible through Matrix and DenseMatrix - Add quadrature (Gauss, Radau, and Lobatto) from Tanganyika - Start merge with Tanganyika - Add support for automatic documentation using doxygen - Update configure scripts - Add greeting at end of compilation 0.3.5 ----- - Define version number only in the file configure.in - Fix compilation problem (missing depcomp) 0.3.4 ----- - Fix bugs in some of the ElementFunction operators - Make convection-diffusion solver work again - Fix bug in integration, move multiplication with the determinant - Fix memory leaks in ElementFunction - Add parameter to choose output format - Make OctaveFile and MatlabFile subclasses of MFile - Add classes ScalarExpressionFunction and VectorExpressionFunction - Make progress bars work cleaner - Get ctrl-c in curses logger - Remove Settings-classes and use dolfin_parameter() - Redesign settings to match the structure of the log system - Add vector functions: Function::Vector - Add vector element functions: ElementFunction::Vector 0.3.3 ----- - Increased functionality of curses-based interface - Add progress bars to log system 0.3.2 ----- - More work on grid refinement - Add new curses based log system 0.3.1 ----- - Makefile updates: make install should now work properly - KrylovSolver updates - Preparation for grid refinement - Matrix and Vector updates 0.3.0 ----- - Make poisson work again, other modules still not working - Add output format for octave - Fix code to compile with g++-3.2 -Wall -Werror - New operators for Matrix - New and faster GMRES solver (speedup factor 4) - Changed name from SparseMatrix to Matrix - Remove old unused code - Add subdirectory math containing mathematical functions - Better access for A(i,j) += to improve speed in assembling - Add benchmark for linear algebra - New definition of finite element - Add algebra for function spaces - Convert grids in data/grids to xml.gz - Add iterators for Nodes and Cells - Change from .hh to .h - Add operators to Vector class (foufas) - Add dependence on libxml2 - Change from .C to .cpp to make Jim happy. - Change input/output functionality to streams - Change to new data structure for Grid - Change to object-oriented API at top level - Add use of C++ namespaces - Complete and major restructuring of the code - Fix compilation error in src/config - Fix name of keyword for convection-diffusion 0.2.11-1 -------- - Fix compilation error (`source`) on Solaris 0.2.11 ------ - Automate build process to simplify addition of new modules - Fix bug in matlab_write_field() (walter) - Fix bug in SparseMatrix::GetCopy() (foufas) 0.2.10-1 -------- - Fix compilation errors on RedHat (thsv) 0.2.10 ------ - Fix compilation of problems to use correct compiler - Change default test problems to the ones in the report - Improve memory management using mpatrol for tracking allocations - Change bool to int for va_arg, seems to be a problem with gcc > 3.0 - Improve input / output support: GiD, Matlab, OpenDX 0.2.8 ----- - Navier-Stokes starting to work again - Add Navier-Stokes 2d - Bug fixes 0.2.7 ----- - Add support for 2D problems - Add module convection-diffusion - Add local/global fields in equation/problem - Bug fixes - Navier-Stokes updates (still broken) 0.2.6 [2002-02-19] ------------------ - Navier-Stokes updates (still broken) - Output to matlab format 0.2.5 ----- - Add variational formulation with overloaded operators for systems - ShapeFunction/LocalField/FiniteElement according to Scott & Brenner 0.2.4 ----- - Add boundary conditions - Poisson seems to work ok 0.2.3 ----- - Add GMRES solver - Add CG solver - Add direct solver - Add Poisson solver - Big changes to the organisation of the source tree - Add kwdist.sh script - Bug fixes 0.2.2: ------ - Remove curses temporarily 0.2.1: ------ - Remove all PETSc stuff. Finally! - Gauss-Seidel cannot handle the pressure equation 0.2.0: ------ - First GPL release - Remove all of Klas Samuelssons proprietary grid code - Adaptivity and refinement broken, include in next release fenics-dolfinx-0.9.0/README.md000066400000000000000000000157751470520666000157340ustar00rootroot00000000000000# DOLFINx [![DOLFINx CI](https://github.com/FEniCS/dolfinx/actions/workflows/ccpp.yml/badge.svg)](https://github.com/FEniCS/dolfinx/actions/workflows/ccpp.yml) [![Actions Docker images](https://github.com/FEniCS/dolfinx/actions/workflows/docker-end-user.yml/badge.svg)](https://github.com/FEniCS/dolfinx/actions/workflows/docker-end-user.yml) [![Actions Spack build](https://github.com/FEniCS/dolfinx/actions/workflows/spack.yml/badge.svg)](https://github.com/FEniCS/dolfinx/actions/workflows/spack.yml) [![Actions Conda install](https://github.com/FEniCS/dolfinx/actions/workflows/conda.yml/badge.svg)](https://github.com/FEniCS/dolfinx/actions/workflows/conda.yml) [![Actions macOS/Homebrew install](https://github.com/FEniCS/dolfinx/actions/workflows/macos.yml/badge.svg)](https://github.com/FEniCS/dolfinx/actions/workflows/macos.yml) [![Actions Windows/vcpkg install](https://github.com/FEniCS/dolfinx/actions/workflows/windows.yml/badge.svg)](https://github.com/FEniCS/dolfinx/actions/workflows/windows.yml) DOLFINx is the computational environment of [FEniCSx](https://fenicsproject.org) and implements the FEniCS Problem Solving Environment in C++ and Python. DOLFINx is a new version of DOLFIN and is actively developed. ## Documentation Documentation can be viewed at . ## Installation ### From source #### C++ core To build and install the C++ core, in the `cpp/` directory, run: ```shell mkdir build cd build cmake .. make install ``` #### Python interface To install the Python interface, first install the C++ core, and then in the `python/` directory run: ```shell pip install -r build-requirements.txt pip install --check-build-dependencies --no-build-isolation . ``` For detailed instructions, see . ### Spack Spack is recommended for building DOLFINx on HPC systems. To build the most recent release using [Spack](https://spack.readthedocs.io/) (assuming a bash-compatible shell): ```shell git clone https://github.com/spack/spack.git . ./spack/share/spack/setup-env.sh spack env create fenicsx-env spack env activate fenicsx-env spack add fenics-dolfinx+adios2 py-fenics-dolfinx cflags="-O3" fflags="-O3" spack install ``` See the Spack [documentation](https://spack.readthedocs.io/) for comprehensive instructions. ### Binary **Recommendations** - macOS: [conda](#conda). - Linux: [apt](#ubuntu-packages) ([Ubuntu](#ubuntu-packages)/[Debian](#debian-packages)), [docker](#docker-images) or [conda](#conda). See also [Spack](#spack). - Windows: [docker](#docker-images), or install [WSL2](https://docs.microsoft.com/en-us/windows/wsl/install) and use [Ubuntu](#ubuntu-packages). [conda](#conda) packages in beta testing. - High performance computers: [Spack](#spack) or [from source](#from-source), both using system-provided MPI. #### conda To install the latest release of the Python interface, with pyvista support for visualisation, using [conda](https://conda.io): ```shell conda create -n fenicsx-env conda activate fenicsx-env conda install -c conda-forge fenics-dolfinx mpich pyvista ``` Windows conda packages are currently in beta testing and can be installed using: ```shell conda create -n fenicsx-env conda activate fenicsx-env conda install -c minrk/label/fenics-windows -c conda-forge fenics-dolfinx=0.9.0.dev ``` Because FEniCS uses just-in-time compilation it also necessary to install [Microsoft Visual Studio](https://visualstudio.microsoft.com/downloads/). conda is distributed with [Anaconda](https://www.anaconda.com/) and [Miniconda](https://docs.conda.io/en/latest/miniconda.html). The recipe is hosted on [conda-forge](https://github.com/conda-forge/fenics-dolfinx-feedstock). | Name | Downloads | Version | Platforms | | --- | --- | --- | --- | | [![Conda Recipe](https://img.shields.io/badge/recipe-fenics--dolfinx-green.svg)](https://anaconda.org/conda-forge/fenics-dolfinx) | [![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/fenics-dolfinx.svg)](https://anaconda.org/conda-forge/fenics-dolfinx) | [![Conda Version](https://img.shields.io/conda/vn/conda-forge/fenics-dolfinx.svg)](https://anaconda.org/conda-forge/fenics-dolfinx) | [![Conda Platforms](https://img.shields.io/conda/pn/conda-forge/fenics-dolfinx.svg)](https://anaconda.org/conda-forge/fenics-dolfinx) | #### Ubuntu packages The [Ubuntu PPA](https://launchpad.net/~fenics-packages/+archive/ubuntu/fenics) provides FEniCSx packages. To install: ```shell add-apt-repository ppa:fenics-packages/fenics apt update apt install fenicsx ``` When a version of DOLFINx is released we aim to provide a package for the most recent LTS version of Ubuntu. All other versions are provided on a best-effort basis. #### Debian packages [DOLFINx](https://tracker.debian.org/pkg/fenics-dolfinx) is included with [various versions](https://packages.debian.org/search?keywords=python3-dolfinx&searchon=names&exact=1&suite=all§ion=all) of Debian. Install with `apt-get install fenicsx`. #### Docker images To run a Docker image with the latest release of DOLFINx: ```shell docker run -ti dolfinx/dolfinx:stable ``` To switch between real and complex builds of DOLFINx/PETSc. ```shell source /usr/local/bin/dolfinx-complex-mode source /usr/local/bin/dolfinx-real-mode ``` A Jupyter Lab environment with the latest release of DOLFINx: ```shell docker run --init -ti -p 8888:8888 dolfinx/lab:stable # Access at http://localhost:8888 ``` A Docker image with DOLFINx built nightly: ```shell docker run -ti dolfinx/dolfinx:nightly ``` A development image with all of the dependencies required to build the latest release of the FEniCSx components: ```shell docker run -ti dolfinx/dev-env:stable ``` A development image with all of the dependencies required to build the `main` branch of the FEniCSx components: ```shell docker run -ti dolfinx/dev-env:current ``` The Docker images support arm64 and amd64 architectures. For a full list of tags, including versioned images, see ## Contributing Information about how to contribute to DOLFINx can be found [here](CONTRIBUTING.md). ## License DOLFINx is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. DOLFINx is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with DOLFINx. If not, see . ## Contact For questions about using DOLFINx, visit the FEniCS Discourse page: or use the FEniCS Slack channel: (use to sign up) For bug reports visit: fenics-dolfinx-0.9.0/RELEASE.md000066400000000000000000000203651470520666000160460ustar00rootroot00000000000000# FEniCSx release guide ## Prerequisites Check out all of the FEniCSx components on the `release` branch. Check that all CIs on `main` are running green. Check that the `main` documentation looks reasonable at https://docs.fenicsproject.org. The release proceeds in a bottom up manner (UFL, Basix, FFCx, DOLFINx). pypa packages cannot be deleted and should be made a number of days after the creation of git tags so that errors can be fixed. GitHub releases can have their version notes updated, and can be deleted and remade on new tags (not recommended). The release process consists of the following steps: 1. Update version numbers and dependencies on the `release` branches. 2. Run integration tests, ensuring that the `release` branches work together. 3. Make git tags on the tip of `release`. 4. Organise production of release artifacts. 5. Update version numbers on `main`. 6. Make GitHub releases (not permanent) 7. pypa releases (permanent!). ## Version bumping At the current phase of development (<1.0) FEniCSx components are typically bumped an entire minor version i.e. `0.+1.0`. UFL still runs on the year-based release scheme. ### UFL version bump 1. Merge `origin/main` into `release` resolving all conflicts in favour of `main`. git pull git checkout release git merge --no-commit origin/main git checkout --theirs origin/main . # files deleted on `main` must be manually git `add`ed git diff origin/main 2. Update the version number in `pyproject.toml`, e.g. `2022.2.0`. 3. Commit and push. 4. Check `git diff origin/main` for obvious errors. ### Basix version bump 1. Merge `origin/main` into `release` resolving all conflicts in favour of `main`. git pull git checkout release git merge --no-commit origin/main git checkout --theirs origin/main . # files deleted on `main` must be manually `git add`ed git diff origin/main 2. Update version numbers in `pyproject.toml`, `python/pyproject.toml`, `CMakeLists.txt` and `cpp/CMakeLists.txt`. 4. In `pyproject.toml` update the `fenics-ufl` optional dependency version. On `main` this is often pointing at the git repo, it needs to be changed to a version bound e.g. `>=2024.1.0,<2024.2.0`. 5. Commit and push. 6. Check `git diff origin/main` for obvious errors. ### FFCx version bump 1. Merge `main` into `release` resolving all conflicts in favour of `main`. git pull git checkout release git merge --no-commit origin/main git checkout --theirs origin/main . # files deleted on `main` must be manually git `add`ed git diff origin/main 2. Update the version number in `pyproject.toml`, e.g. `0.5.0`. 3. Update the dependency versions for `fenics-basix` and `fenics-ufl` in `pyproject.toml`. 4. If necessary, update the version number in `cmake/CMakeLists.txt`, e.g. `0.5.0`. 5. Update the version number macros in `ffcx/codegeneration/ufcx.h`. Typically this should match the Python version number. Remember to change the `UFCX_VERSION_RELEASE` to `1`. 6. Commit and push. 7. Check `git diff origin/main` for obvious errors. ### DOLFINx 1. Merge `main` into `release` resolving all conflicts in favour of `main`. git pull git checkout release git merge --no-commit origin/main git checkout --theirs origin/main . # files deleted on `main` must be manually git `add`ed git diff origin/main 2. In `cpp/CMakeLists.txt` change the version number e.g. `0.5.0`. 3. In `cpp/CMakeLists.txt` change the version number in the `find_package(ufcx)` and `find_package(UFCx)` calls. 4. In `python/pyproject.toml` update the version to e.g. `0.5.0` and update the dependency versions for `fenics-ffcx` and `fenics-ufl`. 5. In `CITATION.md` update the version number `version: 0.5.0` and the release date `date-released: 2022-03-14`. 6. In `.github/ISSUE_TEMPLATE/bug_report.yml` add a new option to the version numbers. 7. Commit and push. 8. Check `git diff origin/main` for obvious errors. ## Integration testing Although lengthy, integration testing is highly effective at discovering issues and mistakes before they reach tagged versions. At each of the following links run the GitHub Action Workflow manually using the `release` branch in all fields, including the . *Only proceed to tagging once all tests pass.* Basix with FFCx: https://github.com/FEniCS/basix/actions/workflows/ffcx-tests.yml Basix with DOLFINx: https://github.com/FEniCS/basix/actions/workflows/dolfinx-tests.yml UFL with FEniCSx: https://github.com/FEniCS/ufl/actions/workflows/fenicsx-tests.yml FFCx with DOLFINx: https://github.com/FEniCS/ffcx/actions/workflows/dolfinx-tests.yml Full stack: https://github.com/FEniCS/dolfinx/actions/workflows/ccpp.yml ## Tagging Make appropriate version tags in each repository. UFL does not use the `v` prefix. git tag v0.5.0 git push --tags origin ## Artifacts ### Documentation Documentation should be pushed automatically to `FEniCS/docs` on the creation of tags. You will need to manually update the `README.md`. ### Docker containers First create tagged development and test environment images, e.g. `v0.5.0`: https://github.com/FEniCS/dolfinx/actions/workflows/docker-dev-test-env.yml Then create tagged end-user images setting the base image as the tagged development image: https://github.com/FEniCS/dolfinx/actions/workflows/docker-end-user.yml The tag prefix should be the same as the DOLFINx tag e.g. `v0.5.0`. Git refs should be appropriate tags for each component. Tagged Docker images will be pushed to Dockerhub and GitHub. docker run -ti dolfinx/dolfinx:v0.5.0 Use the *Docker update stable* tag workflow to update/link `:stable` to e.g. `v0.5.0`. https://github.com/FEniCS/dolfinx/actions/workflows/docker-update-stable.yml ### pypi Wheels can be made using the following actions: https://github.com/FEniCS/basix/actions/workflows/build-wheels.yml https://github.com/FEniCS/ufl/actions/workflows/build-wheels.yml https://github.com/FEniCS/ffcx/actions/workflows/build-wheels.yml Both the workflow and the ref should be set to the appropriate tags for each component. It is recommended to first build without publishing, then to test pypi, then to the real pypi. Publishing to pypa cannot be revoked. The DOLFINx wheel builder is experimental and is not used in the release process at this time. ### Mistakes If something doesn't work, or other issues/bugs are identified during the release process you can either: 1. Make changes on `main` via the usual PR workflow, then `git cherry-pick` or `git merge` the commit back onto `release`. 2. Manually make commits on the `release` branch. If you want the same change to be reflected on `main` option 1. is preferred. If a mistake is noticed soon after making a tag then you can delete the tag and recreate it. It is also possible to recreate GitHub releases. However, if the mistake was noticed after pypi packages are pushed you must create `*.post0` tags or make a minor version bumps , as pypa is immutable. ### GitHub releases Releases can be made at the following links using the appropriate tag. The automatic release notes should be checked. The release notes can still be edited after the release is finalised. https://github.com/FEniCS/basix/releases/new https://github.com/FEniCS/ufl/releases/new https://github.com/FEniCS/ffcx/releases/new https://github.com/FEniCS/dolfinx/releases/new ## Post-release Check for any changes on `release` that should be cherry-picked back onto `main` via a PR. git checkout main git diff release git log git cherry-pick 914ae4 Bump the version numbers on the `main` branches of UFL, Basix, FFCx, DOLFINx following the instructions above. ### Bug fix patches Bug fix versions e.g. `v0.5.1` can be made by cherry picking commits off of `main` and bumping the minor version number. Remember to run the DOLFINx integration tests on a proposed set of tags as it is easy to make an error. ### Debian/Ubuntu Contact Drew Parsons. ### Conda Forge Conda Forge bots typically pickup new releases automatically. Can also contact @minrk. ### Spack Update the Spack recipe for the FEniCSx components on the fork [FEniCS/spack](https://github.com/FEniCS/spack) using a branch e.g. `updates/dolfinx-`. Create a pull request to the Spack mainline repository. fenics-dolfinx-0.9.0/cpp/000077500000000000000000000000001470520666000152205ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/.vcpkg-overlay/000077500000000000000000000000001470520666000200675ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/.vcpkg-overlay/README.md000066400000000000000000000012121470520666000213420ustar00rootroot00000000000000# vcpkg overlay port for Intel MPI This vcpkg overlay port contains scripts for installing Intel MPI on Windows (only). MSMPI, which is used by default with vcpkg, does not support the MPI3 standard. Using this port requires that Intel OneAPI binaries are already installed. On Unix systems the built-in OpenMPI or MPICH ports can be used. From the root of this repository it can be activated by e.g.: cmake -DCMAKE_TOOLCHAIN_FILE=%VCPKG_ROOT%/scripts/buildsystems/vcpkg.cmake -DVCPKG_OVERLAY_PORTS="cpp/.vcpkg-overlay" -B build-dir -S cpp/ This overlay port was adapted from the original at: https://github.com/arcaneframework/framework-ci fenics-dolfinx-0.9.0/cpp/.vcpkg-overlay/intel-mpi/000077500000000000000000000000001470520666000217655ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/.vcpkg-overlay/intel-mpi/mpi-wrapper.cmake000066400000000000000000000021711470520666000252330ustar00rootroot00000000000000get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) # Needed to find 'mpiexec' set(ENV{I_MPI_ROOT} "${_IMPORT_PREFIX}/tools/intel-mpi") set(MPI_C_ADDITIONAL_INCLUDE_DIRS "${_IMPORT_PREFIX}/include" CACHE STRING "MPI C additional include directories" FORCE ) set(MPI_CXX_ADDITIONAL_INCLUDE_DIRS "${_IMPORT_PREFIX}/include" CACHE STRING "MPI CXX additional include directories" FORCE ) set(MPI_C_LIB_NAMES "IMPI" CACHE STRING "MPI C lib name" FORCE ) set(MPI_CXX_LIB_NAMES "IMPI" CACHE STRING "MPI CXX lib name" FORCE ) set(MPI_IMPI_LIBRARY "${_IMPORT_PREFIX}/lib/impi.lib" CACHE STRING "MPI C/CXX libraries" FORCE ) set(MPI_ASSUME_NO_BUILTIN_MPI TRUE CACHE BOOL "" FORCE ) set(MPI_C_COMPILER "${_IMPORT_PREFIX}/tools/intel-mpi/mpicc.bat" CACHE STRING "MPI C Compiler" FORCE ) set(MPI_CXX_COMPILER "${_IMPORT_PREFIX}/tools/intel-mpi/mpicxx.bat" CACHE STRING "MPI C Compiler" FORCE ) unset(_IMPORT_PREFIX) _find_package(${ARGS}) fenics-dolfinx-0.9.0/cpp/.vcpkg-overlay/intel-mpi/portfile.cmake000066400000000000000000000051471470520666000246220ustar00rootroot00000000000000set(INTELMPI_VERSION "2021.12") set(SOURCE_PATH "${CURRENT_BUILDTREES_DIR}/src/intel-mpi-${INTELMPI_VERSION}") cmake_path(SET SDK_SOURCE_DIR "C:/Program Files (x86)/Intel/oneAPI") message(STATUS "Using Intel MPI source SDK at ${SDK_SOURCE_DIR}") set(SDK_SOURCE_MPI_DIR "${SDK_SOURCE_DIR}/mpi/${INTELMPI_VERSION}") set(SOURCE_INCLUDE_PATH "${SDK_SOURCE_MPI_DIR}/include") set(SOURCE_LIB_PATH "${SDK_SOURCE_MPI_DIR}/lib") set(SOURCE_DEBUG_LIB_PATH "${SDK_SOURCE_MPI_DIR}/lib/mpi/debug") set(SOURCE_BIN_PATH "${SDK_SOURCE_MPI_DIR}/bin") set(SOURCE_DEBUG_BIN_PATH "${SDK_SOURCE_MPI_DIR}/bin/mpi/debug") set(SOURCE_TOOLS_PATH "${SDK_SOURCE_MPI_DIR}/bin") set(SOURCE_LIBFABRIC_PATH "${SDK_SOURCE_MPI_DIR}/opt/mpi/libfabric/bin") # Get files in include directory file( GLOB_RECURSE SOURCE_INCLUDE_FILES LIST_DIRECTORIES TRUE "${SOURCE_INCLUDE_PATH}/*" ) # Get files in bin directory file(GLOB TOOLS_FILES "${SOURCE_TOOLS_PATH}/*.exe" "${SOURCE_TOOLS_PATH}/*.dll" "${SOURCE_TOOLS_PATH}/*.bat" ) # Install tools files file(INSTALL ${TOOLS_FILES} DESTINATION "${CURRENT_PACKAGES_DIR}/tools/${PORT}") # Also install include files in the tools directory because the compiler # wrappers (mpicc.bat for example) needs them file(INSTALL ${SOURCE_INCLUDE_FILES} DESTINATION "${CURRENT_PACKAGES_DIR}/tools/${PORT}/include" ) # Install include files file(INSTALL ${SOURCE_INCLUDE_FILES} DESTINATION "${CURRENT_PACKAGES_DIR}/include" ) # Install release library files file(INSTALL "${SOURCE_LIB_PATH}/impi.lib" "${SOURCE_LIB_PATH}/impicxx.lib" DESTINATION "${CURRENT_PACKAGES_DIR}/lib" ) # Install debug library files file(INSTALL "${SOURCE_DEBUG_LIB_PATH}/impi.lib" "${SOURCE_DEBUG_LIB_PATH}/impicxx.lib" DESTINATION "${CURRENT_PACKAGES_DIR}/debug/lib" ) # 'libfabric.dll' is not needed for the compilation but it is needed for the # runtime and should be in the PATH for 'mpiexec' to work file(INSTALL "${SOURCE_LIBFABRIC_PATH}/libfabric.dll" "${SOURCE_BIN_PATH}/impi.dll" "${SOURCE_BIN_PATH}/impi.pdb" DESTINATION "${CURRENT_PACKAGES_DIR}/bin" ) file(INSTALL "${SOURCE_LIBFABRIC_PATH}/libfabric.dll" "${SOURCE_DEBUG_BIN_PATH}/impi.dll" "${SOURCE_DEBUG_BIN_PATH}/impi.pdb" DESTINATION "${CURRENT_PACKAGES_DIR}/debug/bin" ) file(INSTALL "${CMAKE_CURRENT_LIST_DIR}/mpi-wrapper.cmake" DESTINATION "${CURRENT_PACKAGES_DIR}/share/${PORT}" ) # Handle copyright file( COPY "${SDK_SOURCE_DIR}/licensing/2024.1/licensing/2024.1/license.htm" DESTINATION "${CURRENT_PACKAGES_DIR}/share/${PORT}" ) file(WRITE "${CURRENT_PACKAGES_DIR}/share/${PORT}/copyright" "See the licence.htm file in this directory." ) fenics-dolfinx-0.9.0/cpp/.vcpkg-overlay/intel-mpi/vcpkg.json000066400000000000000000000005361470520666000237760ustar00rootroot00000000000000{ "name": "intel-mpi", "version": "2021.12.1", "port-version": 2, "description": "Intel MPI is a Intel implementation of the Message Passing Interface standard for developing and running parallel applications.", "homepage": "https://www.intel.com/content/www/us/en/developer/tools/oneapi/mpi-library.html", "supports": "windows & !uwp" } fenics-dolfinx-0.9.0/cpp/.vcpkg-overlay/mpi/000077500000000000000000000000001470520666000206545ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/.vcpkg-overlay/mpi/portfile.cmake000066400000000000000000000003771470520666000235110ustar00rootroot00000000000000set(VCPKG_POLICY_EMPTY_PACKAGE enabled) if(VCPKG_TARGET_IS_WINDOWS) file( INSTALL "${CURRENT_INSTALLED_DIR}/share/intel-mpi/mpi-wrapper.cmake" DESTINATION "${CURRENT_PACKAGES_DIR}/share/${PORT}" RENAME vcpkg-cmake-wrapper.cmake ) endif() fenics-dolfinx-0.9.0/cpp/.vcpkg-overlay/mpi/vcpkg.json000066400000000000000000000014401470520666000226600ustar00rootroot00000000000000{ "name": "mpi", "version-string": "1", "port-version": 3, "description": "Message Passing Interface (MPI) is a standardized and portable message-passing standard designed by a group of researchers from academia and industry to function on a wide variety of parallel computing architectures. The standard defines the syntax and semantics of a core of library routines useful to a wide range of users writing portable message-passing programs in C, C++, and Fortran. There are several well-tested and efficient implementations of MPI, many of which are open-source or in the public domain.", "license": null, "supports": "!uwp", "dependencies": [ { "name": "intel-mpi", "platform": "windows" }, { "name": "openmpi", "platform": "!windows" } ] } fenics-dolfinx-0.9.0/cpp/CMakeLists.txt000066400000000000000000000374241470520666000177720ustar00rootroot00000000000000# ------------------------------------------------------------------------------ # Top level CMakeLists.txt file for DOLFINx cmake_minimum_required(VERSION 3.21) if(POLICY CMP0167) cmake_policy(SET CMP0167 NEW) # Boost CONFIG mode endif() # ------------------------------------------------------------------------------ # Set project name and version number project(DOLFINX VERSION "0.9.0") set(DOXYGEN_DOLFINX_VERSION ${DOLFINX_VERSION} CACHE STRING "Version for Doxygen" FORCE ) # ------------------------------------------------------------------------------ if(WIN32) # Windows requires all symbols to be manually exported. This flag exports all # symbols automatically, as in Unix. set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS TRUE) endif() # ------------------------------------------------------------------------------ # Get GIT changeset, if available find_program(GIT_FOUND git) if(GIT_FOUND) # Get the commit hash of the working branch execute_process( COMMAND git rev-parse HEAD WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE GIT_COMMIT_HASH OUTPUT_STRIP_TRAILING_WHITESPACE ) else() set(GIT_COMMIT_HASH "unknown") endif() # ------------------------------------------------------------------------------ # General configuration # Set location of our FindFoo.cmake modules set(CMAKE_MODULE_PATH "${DOLFINX_SOURCE_DIR}/cmake/modules") # Make sure CMake uses the correct DOLFINConfig.cmake for tests and demos set(CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH} ${CMAKE_CURRENT_BINARY_DIR}/dolfinx) # ------------------------------------------------------------------------------ # Configurable options for how we want to build include(FeatureSummary) option(BUILD_SHARED_LIBS "Build DOLFINx with shared libraries." ON) add_feature_info( BUILD_SHARED_LIBS BUILD_SHARED_LIBS "Build DOLFINx with shared libraries." ) # If libdolfinx links to a symbol contained in an external dll, that dll will be # installed alongside libdolfinx. This excludes system dlls included with # Windows. option(INSTALL_RUNTIME_DEPENDENCIES "Include runtime dependencies in install (Windows-only)" OFF ) add_feature_info( INSTALL_RUNTIME_DEPENDENCIES INSTALL_RUNTIME_DEPENDENCIES "Include runtime dependencies in install (Windows-only)" ) option(DOLFINX_SKIP_BUILD_TESTS "Skip build tests for testing usability of dependency packages." OFF ) add_feature_info( DOLFINX_SKIP_BUILD_TESTS DOLFINX_SKIP_BUILD_TESTS "Skip build tests for testing usability of dependency packages." ) # Add shared library paths so shared libs in non-system paths are found option(CMAKE_INSTALL_RPATH_USE_LINK_PATH "Add paths to linker search and installed rpath." ON ) add_feature_info( CMAKE_INSTALL_RPATH_USE_LINK_PATH CMAKE_INSTALL_RPATH_USE_LINK_PATH "Add paths to linker search and installed rpath." ) # Control Basix discovery option( DOLFINX_BASIX_PYTHON "Ask Python basix module for hint where to find Basix C++ install using CONFIG mode." ON ) add_feature_info( DOLFINX_BASIX_PYTHON DOLFINX_BASIX_PYTHON "Ask Python basix module for hint where to find Basix C++ install using CONFIG mode." ) # Control UFCx discovery option( DOLFINX_UFCX_PYTHON "Ask Python FFCx module where to find ufcx.h header using MODULE mode. Otherwise use CONFIG mode." ON ) add_feature_info( DOLFINX_UFCX_PYTHON DOLFINX_UFCX_PYTHON "Ask Python FFCx module where to find ufcx.h header using MODULE mode. Otherwise use CONFIG mode." ) # ------------------------------------------------------------------------------ # Enable or disable optional packages if(DOLFINX_ENABLE_ADIOS2) set(_REQUIRE_ADIOS2 TRUE CACHE BOOL "Is ADIOS2 REQUIRED?" ) else() set(_REQUIRE_ADIOS2 FALSE CACHE BOOL "Is ADIOS2 REQUIRED?" ) endif() option(DOLFINX_ENABLE_ADIOS2 "Compile with support for ADIOS2." ON) set_package_properties( ADIOS2 PROPERTIES TYPE OPTIONAL DESCRIPTION "Adaptable Input/Output (I/O) System." URL "https://adios2.readthedocs.io/en/latest/" PURPOSE "IO, including in parallel" ) if(DOLFINX_ENABLE_PETSC) set(_REQUIRE_PETSC TRUE CACHE BOOL "Is PETSc REQUIRED?" ) else() set(_REQUIRE_PETSC FALSE CACHE BOOL "Is PETSc REQUIRED?" ) endif() option(DOLFINX_ENABLE_PETSC "Compile with support for PETSc." ON) set_package_properties( PETSc PROPERTIES TYPE RECOMMENDED DESCRIPTION "Portable, Extensible Toolkit for Scientific Computation" URL "https://petsc.org/" PURPOSE "Linear and nonlinear solvers" ) if(DOLFINX_ENABLE_PARMETIS) set(_REQUIRE_PARMETIS TRUE CACHE BOOL "Is Parmetis REQUIRED?" ) else() set(_REQUIRE_PARMETIS FALSE CACHE BOOL "Is Parmetis REQUIRED?" ) endif() option(DOLFINX_ENABLE_PARMETIS "Compile with support for ParMETIS." ON) set_package_properties( ParMETIS PROPERTIES TYPE RECOMMENDED DESCRIPTION "Parallel Graph Partitioning and Fill-reducing Matrix Ordering" URL "http://glaros.dtc.umn.edu/gkhome/metis/parmetis/overview" PURPOSE "Parallel graph partitioning" ) if(DOLFINX_ENABLE_SCOTCH) set(_REQUIRE_SCOTCH TRUE CACHE BOOL "Is SCOTCH REQUIRED?" ) else() set(_REQUIRE_SCOTCH FALSE CACHE BOOL "Is SCOTCH REQUIRED?" ) endif() option(DOLFINX_ENABLE_SCOTCH "Compile with support for SCOTCH." ON) set_package_properties( SCOTCH PROPERTIES TYPE OPTIONAL DESCRIPTION "Programs and libraries for graph, mesh and hypergraph partitioning" URL "https://www.labri.fr/perso/pelegrin/scotch" PURPOSE "Parallel graph partitioning" ) if(DOLFINX_ENABLE_SLEPC) set(_REQUIRE_SLEPC TRUE CACHE BOOL "Is SLEPc REQUIRED?" ) else() set(_REQUIRE_SLEPC FALSE CACHE BOOL "Is SLEPc REQUIRED?" ) endif() option(DOLFINX_ENABLE_SLEPC "Compile with support for SLEPc." ON) set_package_properties( SLEPc PROPERTIES TYPE RECOMMENDED DESCRIPTION "Scalable Library for Eigenvalue Problem Computations" URL "http://slepc.upv.es/" PURPOSE "Eigenvalue computation" ) if(DOLFINX_ENABLE_KAHIP) set(_REQUIRE_KAHIP TRUE CACHE BOOL "Is KaHIP REQUIRED?" ) else() set(_REQUIRE_KAHIP FALSE CACHE BOOL "Is KaHIP REQUIRED?" ) endif() option(DOLFINX_ENABLE_KAHIP "Compile with support for KaHIP." ON) set_package_properties( KaHIP PROPERTIES TYPE OPTIONAL DESCRIPTION "A family of graph partitioning programs" URL "https://kahip.github.io/" PURPOSE "Parallel graph partitioning" ) # ------------------------------------------------------------------------------ # Check for MPI find_package(MPI 3 REQUIRED) find_package(spdlog REQUIRED) # ------------------------------------------------------------------------------ # Compiler flags # Default build type (can be overridden by user) if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING "Choose the type of build, options are: Debug Developer MinSizeRel Release RelWithDebInfo." FORCE ) endif() # Check for some compiler flags include(CheckCXXCompilerFlag) check_cxx_compiler_flag(-pipe HAVE_PIPE) if(HAVE_PIPE) list(APPEND DOLFINX_CXX_DEVELOPER_FLAGS -pipe) endif() # Add some strict compiler checks check_cxx_compiler_flag("-Wall -Werror -Wextra -pedantic" HAVE_PEDANTIC) if(HAVE_PEDANTIC) list(APPEND DOLFINX_CXX_DEVELOPER_FLAGS -Wall;-Werror;-Wextra;-pedantic) endif() # Debug flags check_cxx_compiler_flag(-g HAVE_DEBUG) if(HAVE_DEBUG) list(APPEND DOLFINX_CXX_DEVELOPER_FLAGS -g) endif() # Optimisation check_cxx_compiler_flag(-O2 HAVE_O2_OPTIMISATION) if(HAVE_O2_OPTIMISATION) list(APPEND DOLFINX_CXX_DEVELOPER_FLAGS -O2) endif() # Turn off some checks in gcc12 and gcc13 due to false positives with the fmt # library if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "11.4" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS "14.0") list(APPEND DOLFINX_CXX_DEVELOPER_FLAGS -Wno-array-bounds;-Wno-stringop-overflow) endif() # ------------------------------------------------------------------------------ # Find required packages # pugixml find_package(pugixml REQUIRED) # Note: When updating Boost version, also update DOLFINXConfig.cmake.in if(DEFINED ENV{BOOST_ROOT} OR DEFINED BOOST_ROOT) set(Boost_NO_SYSTEM_PATHS on) endif() set(Boost_USE_MULTITHREADED $ENV{BOOST_USE_MULTITHREADED}) set(Boost_VERBOSE TRUE) find_package(Boost 1.70 REQUIRED timer) set_package_properties( Boost PROPERTIES TYPE REQUIRED DESCRIPTION "Boost C++ libraries" URL "http://www.boost.org" ) # Basix C++ files can be installed as a standalone C++ library, or in the Basix # Python module tree. # If requested (default), ask the Python interpreter for hints on where to find # Basix C++ library. if(DOLFINX_BASIX_PYTHON) find_package( Python3 COMPONENTS Interpreter QUIET ) if(Python3_Interpreter_FOUND) message(STATUS "Checking for Basix hints with ${Python3_EXECUTABLE}") execute_process( COMMAND ${Python3_EXECUTABLE} -c "import basix, os, sys; sys.stdout.write(os.path.dirname(basix.__file__))" OUTPUT_VARIABLE BASIX_PY_DIR RESULT_VARIABLE BASIX_PY_COMMAND_RESULT ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE ) endif() if(BASIX_PY_DIR) # Converts os native to cmake native path cmake_path(SET BASIX_PY_DIR "${BASIX_PY_DIR}") message(STATUS "Adding ${BASIX_PY_DIR} to Basix search hints") # Basix installed from manylinux wheel requires rpath set. if(IS_DIRECTORY ${BASIX_PY_DIR}/../fenics_basix.libs) set(CMAKE_INSTALL_RPATH ${BASIX_PY_DIR}/../fenics_basix.libs) endif() else() message(STATUS "No Basix hint was found.") endif() endif() find_package(Basix 0.8 REQUIRED CONFIG HINTS ${BASIX_PY_DIR}) set_package_properties( basix PROPERTIES TYPE REQUIRED DESCRIPTION "FEniCS tabulation library" URL "https://github.com/fenics/basix" ) # Check for HDF5 set(HDF5_PREFER_PARALLEL TRUE) set(HDF5_FIND_DEBUG TRUE) find_package(HDF5 REQUIRED COMPONENTS C) if(NOT HDF5_IS_PARALLEL) message( FATAL_ERROR "Found serial HDF5 build, MPI HDF5 build required, try setting HDF5_DIR or HDF5_ROOT" ) endif() set_package_properties( HDF5 PROPERTIES TYPE REQUIRED DESCRIPTION "Hierarchical Data Format 5 (HDF5)" URL "https://www.hdfgroup.org/HDF5" ) # Check for UFC Note: we use the case (ufcx vs UFCx) elsewhere to determine by # which method UFCx was found. if(NOT DOLFINX_UFCX_PYTHON) # Check in CONFIG mode, i.e. look for installed ufcxConfig.cmake find_package(ufcx 0.9 REQUIRED CONFIG) else() # Check in MODULE mode (using FindUFCX.cmake) using Python intepreter. find_package( Python3 COMPONENTS Interpreter REQUIRED ) find_package(UFCx 0.9 REQUIRED MODULE) endif() set_package_properties( UFCx PROPERTIES TYPE REQUIRED DESCRIPTION "Interface for form-compilers (part of FFCx)" URL "https://github.com/fenics/ffcx" ) # ------------------------------------------------------------------------------ # Find optional packages if(DOLFINX_ENABLE_ADIOS2 AND _REQUIRE_ADIOS2) find_package(ADIOS2 2.8.1 REQUIRED) elseif(DOLFINX_ENABLE_ADIOS2) find_package(ADIOS2 2.8.1) endif() if(ADIOS2_FOUND AND NOT ADIOS2_HAVE_MPI) message( FATAL_ERROR "Found serial ADIOS2 build, MPI ADIOS2 build required, try setting ADIOS2_DIR or ADIOS2_ROOT" ) endif() if(DOLFINX_ENABLE_PETSC) find_package(PkgConfig REQUIRED) set(ENV{PKG_CONFIG_PATH} "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/lib/pkgconfig:$ENV{PETSC_DIR}/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}" ) if(_REQUIRE_PETSC) pkg_search_module(PETSC REQUIRED IMPORTED_TARGET PETSc>=3.15 petsc>=3.15) else() pkg_search_module(PETSC OPTIONAL IMPORTED_TARGET PETSc>=3.15 petsc>=3.15) endif() # Setting for FeatureSummary if(PETSC_FOUND) message( STATUS "Found PETSc version ${PETSC_VERSION}, prefix: ${PETSC_PREFIX}" ) set_property(GLOBAL APPEND PROPERTY PACKAGES_FOUND PETSc) else() set_property(GLOBAL APPEND PROPERTY PACKAGES_NOT_FOUND PETSc) endif() endif() if(DOLFINX_ENABLE_SLEPC AND PETSC_FOUND) find_package(PkgConfig REQUIRED) set(ENV{PKG_CONFIG_PATH} "$ENV{SLEPC_DIR}/$ENV{PETSC_ARCH}/lib/pkgconfig:$ENV{SLEPC_DIR}/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}" ) set(ENV{PKG_CONFIG_PATH} "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/lib/pkgconfig:$ENV{PETSC_DIR}/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}" ) set(ENV{PKG_CONFIG_PATH} "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}:$ENV{PETSC_DIR}:$ENV{PKG_CONFIG_PATH}" ) if(_REQUIRE_SLEPC) pkg_search_module(SLEPC REQUIRED IMPORTED_TARGET slepc>=3.15) else() pkg_search_module(SLEPC IMPORTED_TARGET slepc>=3.15) endif() # Setting for FeatureSummary if(SLEPC_FOUND) message( STATUS "Found SLEPc version ${SLEPC_VERSION}, prefix: ${SLEPC_PREFIX}" ) set_property(GLOBAL APPEND PROPERTY PACKAGES_FOUND SLEPc) else() set_property(GLOBAL APPEND PROPERTY PACKAGES_NOT_FOUND SLEPc) endif() elseif(_REQUIRE_SLEPC AND NOT PETSC_FOUND) message( FATAL_ERROR "SLEPc requested, but no configured because PETSc was not found." ) endif() if(DOLFINX_ENABLE_SCOTCH) find_package(SCOTCH CONFIG) # Attempt to find in CONFIG mode if(NOT SCOTCH_FOUND) if(_REQUIRE_SCOTCH) # If not found in CONFIG mode, try MODULE mode find_package(SCOTCH REQUIRED) else() find_package(SCOTCH) endif() endif() if(TARGET SCOTCH::scotch AND NOT TARGET SCOTCH::ptscotch) message( STATUS "SCOTCH found, but not PT-SCOTCH (parallel). Not enabling SCOTCH." ) set(SCOTCH_FOUND FALSE) endif() endif() if(DOLFINX_ENABLE_PARMETIS AND _REQUIRE_PARMETIS) find_package(ParMETIS 4.0.2 REQUIRED) elseif(DOLFINX_ENABLE_PARMETIS) find_package(ParMETIS 4.0.2) endif() if(DOLFINX_ENABLE_KAHIP AND _REQUIRE_KAHIP) find_package(KaHIP REQUIRED) elseif(DOLFINX_ENABLE_KAHIP) find_package(KaHIP) endif() # ------------------------------------------------------------------------------ # Print summary of found and not found optional packages feature_summary(WHAT ALL) # Check that at least one graph partitioner has been found if(NOT SCOTCH_FOUND AND NOT PARMETIS_FOUND AND NOT KAHIP_FOUND ) message( FATAL_ERROR "No graph partitioner found. SCOTCH, ParMETIS or KaHIP is required." ) endif() # ------------------------------------------------------------------------------ # Installation of DOLFINx library add_subdirectory(dolfinx) # ------------------------------------------------------------------------------ # Generate and install helper file dolfinx.conf # FIXME: Can CMake provide the library path name variable? if(APPLE) set(OS_LIBRARY_PATH_NAME "DYLD_LIBRARY_PATH") else() set(OS_LIBRARY_PATH_NAME "LD_LIBRARY_PATH") endif() # FIXME: not cross-platform compatible Create and install dolfinx.conf file configure_file( ${DOLFINX_SOURCE_DIR}/cmake/templates/dolfinx.conf.in ${CMAKE_BINARY_DIR}/dolfinx.conf @ONLY ) install( FILES ${CMAKE_BINARY_DIR}/dolfinx.conf DESTINATION ${CMAKE_INSTALL_LIBDIR}/dolfinx COMPONENT Development ) # ------------------------------------------------------------------------------ # Install the demo source files install( DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/demo DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/dolfinx FILES_MATCHING PATTERN "CMakeLists.txt" PATTERN "*.h" PATTERN "*.hpp" PATTERN "*.c" PATTERN "*.cpp" PATTERN "*.py" PATTERN "*.xdmf" PATTERN "*.h5" PATTERN "CMakeFiles" EXCLUDE ) # ------------------------------------------------------------------------------ # Add "make uninstall" target configure_file( "${DOLFINX_SOURCE_DIR}/cmake/templates/cmake_uninstall.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" IMMEDIATE @ONLY ) add_custom_target( uninstall "${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" ) # ------------------------------------------------------------------------------ # Print post-install message add_subdirectory(cmake/post-install) # ------------------------------------------------------------------------------ fenics-dolfinx-0.9.0/cpp/cmake/000077500000000000000000000000001470520666000163005ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/cmake/modules/000077500000000000000000000000001470520666000177505ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/cmake/modules/FindKaHIP.cmake000066400000000000000000000074221470520666000224540ustar00rootroot00000000000000#============================================================================= # - Try to find KaHIP # Once done this will define # # KAHIP_FOUND - system has KaHIP # KAHIP_INCLUDE_DIRS - include directories for KaHIP # KAHIP_LIBRARIES - libraries for KaHIP # KAHIP_VERSION - version for KaHIP # #============================================================================= # Copyright (C) 2019 Igor A. Baratta # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. #============================================================================= set(KAHIP_FOUND FALSE) message(STATUS "Checking for package 'KaHIP'") if(MPI_CXX_FOUND) find_path( KAHIP_INCLUDE_DIRS parhip_interface.h HINTS ${KAHIP_DIR}/include $ENV{KAHIP_DIR}/include PATH_SUFFIXES kahip ) find_library( PARHIP_LIBRARY parhip_interface HINTS ${KAHIP_DIR}/lib $ENV{KAHIP_DIR}/lib ) find_library(KAHIP_LIBRARY kahip HINTS ${KAHIP_DIR}/lib $ENV{KAHIP_DIR}/lib) set(KAHIP_LIBRARIES ${PARHIP_LIBRARY} ${KAHIP_LIBRARY}) include(FindPackageHandleStandardArgs) if(DOLFINX_SKIP_BUILD_TESTS) find_package_handle_standard_args( KaHIP "KaHIP could not be found/configured." KAHIP_INCLUDE_DIRS KAHIP_LIBRARIES ) else() if(KAHIP_LIBRARIES AND KAHIP_LIBRARIES) # Build and run test program include(CheckCXXSourceRuns) # Set flags for building test program set(CMAKE_REQUIRED_INCLUDES ${KAHIP_INCLUDE_DIRS} ${MPI_CXX_INCLUDE_PATH}) set(CMAKE_REQUIRED_LIBRARIES ${KAHIP_LIBRARIES} ${MPI_CXX_LIBRARIES}) set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS} ${MPI_CXX_COMPILE_FLAGS}) check_cxx_source_runs( " #define MPICH_IGNORE_CXX_SEEK 1 #include #include #include int main() { int n = 5; std::vector xadj = {0, 2, 5, 7, 9, 12}; std::vector adjncy = {1, 4, 0, 2, 4, 1, 3, 2, 4, 0, 1, 3}; std::vector part(n); double imbalance = 0.03; int edge_cut = 0; int nparts = 2; int *vwgt = nullptr;; int *adjcwgt = nullptr;; kaffpa(&n, vwgt, xadj.data(), adjcwgt, adjncy.data(), &nparts, &imbalance, false, 0, ECO, &edge_cut, part.data()); return 0; } " KAHIP_TEST_RUNS ) endif() find_package_handle_standard_args( KaHIP "KaHIP could not be found/configured." KAHIP_INCLUDE_DIRS KAHIP_LIBRARIES KAHIP_TEST_RUNS ) endif() endif() fenics-dolfinx-0.9.0/cpp/cmake/modules/FindParMETIS.cmake000066400000000000000000000136141470520666000231040ustar00rootroot00000000000000#============================================================================= # - Try to find ParMETIS # Once done this will define # # PARMETIS_FOUND - system has ParMETIS # PARMETIS_INCLUDE_DIRS - include directories for ParMETIS # PARMETIS_LIBRARIES - libraries for ParMETIS # PARMETIS_VERSION - version for ParMETIS # #============================================================================= # Copyright (C) 2010 Garth N. Wells, Anders Logg and Johannes Ring # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. #============================================================================= if(MPI_CXX_FOUND) find_path( PARMETIS_INCLUDE_DIRS parmetis.h HINTS ${PARMETIS_ROOT}/include $ENV{PARMETIS_ROOT}/include ${PETSC_INCLUDE_DIRS} DOC "Directory where the ParMETIS header files are located" ) find_library( PARMETIS_LIBRARY parmetis HINTS ${PARMETIS_ROOT}/lib $ENV{PARMETIS_ROOT}/lib ${PETSC_LIBRARY_DIRS} NO_DEFAULT_PATH DOC "Directory where the ParMETIS library is located" ) find_library( PARMETIS_LIBRARY parmetis DOC "Directory where the ParMETIS library is located" ) find_library( METIS_LIBRARY metis HINTS ${PARMETIS_ROOT}/lib $ENV{PARMETIS_ROOT}/lib ${PETSC_LIBRARY_DIRS} NO_DEFAULT_PATH DOC "Directory where the METIS library is located" ) find_library( METIS_LIBRARY metis DOC "Directory where the METIS library is located" ) # Newer METIS and ParMETIS build against separate GKLib find_library( GKLIB_LIBRARY gklib HINTS ${PARMETIS_ROOT}/lib $ENV{PARMETIS_ROOT}/lib ${PETSC_LIBRARY_DIRS} NO_DEFAULT_PATH DOC "Directory where the gklib library is located" ) find_library( GKLIB_LIBRARY gklib DOC "Directory where the GKLib library is located" ) set(PARMETIS_LIBRARIES ${PARMETIS_LIBRARY}) if(METIS_LIBRARY) set(PARMETIS_LIBRARIES ${PARMETIS_LIBRARIES} ${METIS_LIBRARY}) endif() if(GKLIB_LIBRARY) set(PARMETIS_LIBRARIES ${PARMETIS_LIBRARIES} ${METIS_LIBRARY} ${GKLIB_LIBRARY} ) endif() # Try compiling and running test program if(DOLFINX_SKIP_BUILD_TESTS) set(PARMETIS_TEST_RUNS TRUE) set(PARMETIS_VERSION "UNKNOWN") set(PARMETIS_VERSION_OK TRUE) elseif(PARMETIS_INCLUDE_DIRS AND PARMETIS_LIBRARY) # Set flags for building test program set(CMAKE_REQUIRED_INCLUDES ${PARMETIS_INCLUDE_DIRS} ${MPI_CXX_INCLUDE_PATH} ) set(CMAKE_REQUIRED_LIBRARIES ${PARMETIS_LIBRARIES} ${MPI_CXX_LIBRARIES}) set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS} ${MPI_CXX_COMPILE_FLAGS}) # Check ParMETIS version set(PARMETIS_CONFIG_TEST_VERSION_CPP "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/parmetis_config_test_version.cpp" ) file( WRITE ${PARMETIS_CONFIG_TEST_VERSION_CPP} " #define MPICH_IGNORE_CXX_SEEK 1 #include #include \"parmetis.h\" int main() { #ifdef PARMETIS_SUBMINOR_VERSION std::cout << PARMETIS_MAJOR_VERSION << \".\" << PARMETIS_MINOR_VERSION << \".\" << PARMETIS_SUBMINOR_VERSION; #else std::cout << PARMETIS_MAJOR_VERSION << \".\" << PARMETIS_MINOR_VERSION; #endif return 0; } " ) try_run( PARMETIS_CONFIG_TEST_VERSION_EXITCODE PARMETIS_CONFIG_TEST_VERSION_COMPILED ${CMAKE_CURRENT_BINARY_DIR} ${PARMETIS_CONFIG_TEST_VERSION_CPP} CMAKE_FLAGS "-DINCLUDE_DIRECTORIES:STRING=${CMAKE_REQUIRED_INCLUDES}" "-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}" COMPILE_OUTPUT_VARIABLE PARMETIS_CONFIG_TEST_VERSION_COMPILE_OUTPUT RUN_OUTPUT_VARIABLE PARMETIS_CONFIG_TEST_VERSION_OUTPUT ) if(PARMETIS_CONFIG_TEST_VERSION_EXITCODE EQUAL 0) set(PARMETIS_VERSION ${PARMETIS_CONFIG_TEST_VERSION_OUTPUT}) mark_as_advanced(PARMETIS_VERSION) endif() if(ParMETIS_FIND_VERSION) # Check if version found is >= required version if(NOT "${PARMETIS_VERSION}" VERSION_LESS "${ParMETIS_FIND_VERSION}") set(PARMETIS_VERSION_OK TRUE) endif() else() # No specific version requested set(PARMETIS_VERSION_OK TRUE) endif() mark_as_advanced(PARMETIS_VERSION_OK) # Build and run test program include(CheckCXXSourceRuns) check_cxx_source_runs( " #define MPICH_IGNORE_CXX_SEEK 1 #include #include int main() { // FIXME: Find a simple but sensible test for ParMETIS return 0; } " PARMETIS_TEST_RUNS ) endif() endif() # Standard package handling find_package_handle_standard_args( ParMETIS "ParMETIS could not be found/configured." PARMETIS_LIBRARIES PARMETIS_TEST_RUNS PARMETIS_INCLUDE_DIRS PARMETIS_VERSION PARMETIS_VERSION_OK ) fenics-dolfinx-0.9.0/cpp/cmake/modules/FindSCOTCH.cmake000066400000000000000000000322551470520666000225450ustar00rootroot00000000000000#============================================================================= # - Try to find SCOTCH # Once done this will define # # SCOTCH_FOUND - system has found SCOTCH # SCOTCH_INCLUDE_DIRS - include directories for SCOTCH # SCOTCH_LIBARIES - libraries for SCOTCH # SCOTCH_VERSION - version for SCOTCH # # Variables used by this module, they can change the default behaviour and # need to be set before calling find_package: # # SCOTCH_DEBUG - Set this to TRUE to enable debugging output # of FindScotchPT.cmake if you are having problems. # Please enable this before filing any bug reports. # #============================================================================= # Copyright (C) 2010-2011 Garth N. Wells, Johannes Ring and Anders Logg # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. #============================================================================= set(SCOTCH_FOUND FALSE) message(STATUS "Checking for package 'SCOTCH-PT'") # Check for header file find_path( SCOTCH_INCLUDE_DIRS ptscotch.h HINTS ${SCOTCH_DIR}/include $ENV{SCOTCH_DIR}/include ${SCOTCH_ROOT}/include $ENV{SCOTCH_ROOT}/include ${PETSC_INCLUDE_DIRS} PATH_SUFFIXES scotch DOC "Directory where the SCOTCH-PT header is located" ) # Check for scotch find_library( SCOTCH_LIBRARY NAMES scotch HINTS ${SCOTCH_DIR}/lib $ENV{SCOTCH_DIR}/lib ${SCOTCH_ROOT}/lib $ENV{SCOTCH_ROOT}/lib ${PETSC_LIBRARY_DIRS} NO_DEFAULT_PATH DOC "The SCOTCH library" ) find_library( SCOTCH_LIBRARY NAMES scotch DOC "The SCOTCH library" ) # Check for scotcherr find_library( SCOTCHERR_LIBRARY NAMES scotcherr HINTS ${SCOTCH_DIR}/lib $ENV{SCOTCH_DIR}/lib ${SCOTCH_ROOT}/lib $ENV{SCOTCH_ROOT}/lib NO_DEFAULT_PATH DOC "The SCOTCH-ERROR library" ) find_library( SCOTCHERR_LIBRARY NAMES scotcherr DOC "The SCOTCH-ERROR library" ) # Check for ptscotch find_library( PTSCOTCH_LIBRARY NAMES ptscotch HINTS ${SCOTCH_DIR}/lib $ENV{SCOTCH_DIR}/lib ${SCOTCH_ROOT}/lib $ENV{SCOTCH_ROOT}/lib ${PETSC_LIBRARY_DIRS} NO_DEFAULT_PATH DOC "The PTSCOTCH library" ) find_library( PTSCOTCH_LIBRARY NAMES ptscotch DOC "The PTSCOTCH library" ) # Check for ptesmumps find_library( PTESMUMPS_LIBRARY NAMES ptesmumps esmumps HINTS ${SCOTCH_DIR}/lib $ENV{SCOTCH_DIR}/lib ${SCOTCH_ROOT}/lib $ENV{SCOTCH_ROOT}/lib ${PETSC_LIBRARY_DIRS} NO_DEFAULT_PATH DOC "The PTSCOTCH-ESMUMPS library" ) find_library( PTESMUMPS_LIBRARY NAMES ptesmumps esmumps DOC "The PTSCOTCH-ESMUMPS library" ) # Check for ptscotcherr find_library( PTSCOTCHERR_LIBRARY NAMES ptscotcherr HINTS ${SCOTCH_DIR}/lib $ENV{SCOTCH_DIR}/lib ${SCOTCH_ROOT}/lib $ENV{SCOTCH_ROOT}/lib ${PETSC_LIBRARY_DIRS} NO_DEFAULT_PATH DOC "The PTSCOTCH-ERROR library" ) find_library( PTSCOTCHERR_LIBRARY NAMES ptscotcherr DOC "The PTSCOTCH-ERROR library" ) set(SCOTCH_LIBRARIES ${PTSCOTCH_LIBRARY}) if(PTESMUMPS_LIBRARY) set(SCOTCH_LIBRARIES ${SCOTCH_LIBRARIES} ${PTESMUMPS_LIBRARY}) endif() set(SCOTCH_LIBRARIES ${SCOTCH_LIBRARIES} ${PTSCOTCHERR_LIBRARY}) # Basic check of SCOTCH_VERSION which does not require compilation if(SCOTCH_INCLUDE_DIRS) file(STRINGS "${SCOTCH_INCLUDE_DIRS}/ptscotch.h" PTSCOTCH_H) string(REGEX MATCH "SCOTCH_VERSION [0-9]+" SCOTCH_VERSION "${PTSCOTCH_H}") string(REGEX MATCH "[0-9]+" SCOTCH_VERSION "${SCOTCH_VERSION}") endif() # If SCOTCH_VERSION was not found in ptscotch.h, look in scotch.h if(SCOTCH_INCLUDE_DIRS AND NOT SCOTCH_VERSION) file(STRINGS "${SCOTCH_INCLUDE_DIRS}/scotch.h" SCOTCH_H) string(REGEX MATCH "SCOTCH_VERSION [0-9]+" SCOTCH_VERSION "${SCOTCH_H}") string(REGEX MATCH "[0-9]+" SCOTCH_VERSION "${SCOTCH_VERSION}") endif() # For SCOTCH version > 6, need to add libraries scotch and ptscotch if(NOT "${SCOTCH_VERSION}" VERSION_LESS "6") set(SCOTCH_LIBRARIES ${PTSCOTCH_LIBRARY} ${SCOTCH_LIBRARY} ${PTSCOTCHERR_LIBRARY} ) set(CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES} ${SCOTCH_LIBRARY}) endif() # Try compiling and running test program if(DOLFINX_SKIP_BUILD_TESTS) message(STATUS "Found SCOTCH (version ${SCOTCH_VERSION})") set(SCOTCH_TEST_RUNS TRUE) elseif(SCOTCH_INCLUDE_DIRS AND SCOTCH_LIBRARIES) if(SCOTCH_DEBUG) message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " "location of ptscotch.h: ${SCOTCH_INCLUDE_DIRS}/ptscotch.h" ) message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " "location of libscotch: ${SCOTCH_LIBRARY}" ) message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " "location of libptscotch: ${PTSCOTCH_LIBRARY}" ) message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " "location of libptscotcherr: ${PTSCOTCHERR_LIBRARY}" ) endif() # Set flags for building test program set(CMAKE_REQUIRED_INCLUDES ${SCOTCH_INCLUDE_DIRS}) set(CMAKE_REQUIRED_LIBRARIES ${SCOTCH_LIBRARIES}) # set(CMAKE_REQUIRED_LIBRARIES ${SCOTCH_LIBRARY} ${SCOTCHERR_LIBRARY}) # Add MPI variables if MPI has been found if(MPI_CXX_FOUND) set(CMAKE_REQUIRED_INCLUDES ${CMAKE_REQUIRED_INCLUDES} ${MPI_CXX_INCLUDE_PATH} ) set(CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES} ${MPI_CXX_LIBRARIES} ) set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${MPI_CXX_COMPILE_FLAGS}") endif() set(SCOTCH_CONFIG_TEST_VERSION_CPP "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/scotch_config_test_version.cpp" ) file( WRITE ${SCOTCH_CONFIG_TEST_VERSION_CPP} " #define MPICH_IGNORE_CXX_SEEK 1 #include #include #include #include #include int main() { std::cout << SCOTCH_VERSION << \".\" << SCOTCH_RELEASE << \".\" << SCOTCH_PATCHLEVEL; return 0; } " ) try_run( SCOTCH_CONFIG_TEST_VERSION_EXITCODE SCOTCH_CONFIG_TEST_VERSION_COMPILED ${CMAKE_CURRENT_BINARY_DIR} ${SCOTCH_CONFIG_TEST_VERSION_CPP} CMAKE_FLAGS "-DINCLUDE_DIRECTORIES:STRING=${CMAKE_REQUIRED_INCLUDES}" "-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}" COMPILE_OUTPUT_VARIABLE COMPILE_OUTPUT RUN_OUTPUT_VARIABLE OUTPUT ) # Set version number if(SCOTCH_CONFIG_TEST_VERSION_EXITCODE EQUAL 0) set(SCOTCH_VERSION ${OUTPUT}) message(STATUS "Found SCOTCH (version ${SCOTCH_VERSION})") endif() # PT-SCOTCH was first introduced in SCOTCH version 5.0 FIXME: parallel graph # partitioning features in PT-SCOTCH was first introduced in 5.1. Do we # require version 5.1? if(NOT ${SCOTCH_VERSION} VERSION_LESS "5.0") set(SCOTCH_TEST_LIB_CPP "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/scotch_test_lib.cpp" ) file( WRITE ${SCOTCH_TEST_LIB_CPP} " #define MPICH_IGNORE_CXX_SEEK 1 #include #include #include #include #include #include int main() { int provided; SCOTCH_Dgraph dgrafdat; MPI_Init_thread(0, 0, MPI_THREAD_MULTIPLE, &provided); if (SCOTCH_dgraphInit(&dgrafdat, MPI_COMM_WORLD) != 0) { if (MPI_THREAD_MULTIPLE > provided) { std::cout << \"MPI implementation is not thread-safe:\" << std::endl; std::cout << \"SCOTCH should be compiled without SCOTCH_PTHREAD\" << std::endl; exit(1); } else { std::cout << \"libptscotch linked to libscotch or other unknown error\" << std::endl; exit(2); } } else { SCOTCH_dgraphExit(&dgrafdat); } MPI_Finalize(); return 0; } " ) message(STATUS "Performing test SCOTCH_TEST_RUNS") try_run( SCOTCH_TEST_LIB_EXITCODE SCOTCH_TEST_LIB_COMPILED ${CMAKE_CURRENT_BINARY_DIR} ${SCOTCH_TEST_LIB_CPP} CMAKE_FLAGS "-DINCLUDE_DIRECTORIES:STRING=${CMAKE_REQUIRED_INCLUDES}" "-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}" COMPILE_OUTPUT_VARIABLE SCOTCH_TEST_LIB_COMPILE_OUTPUT RUN_OUTPUT_VARIABLE SCOTCH_TEST_LIB_OUTPUT ) if(SCOTCH_TEST_LIB_COMPILED AND SCOTCH_TEST_LIB_EXITCODE EQUAL 0) message(STATUS "Performing test SCOTCH_TEST_RUNS - Success") set(SCOTCH_TEST_RUNS TRUE) else() message(STATUS "Performing test SCOTCH_TEST_RUNS - Failed") if(SCOTCH_DEBUG) # Output some variables message( STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " "SCOTCH_TEST_LIB_COMPILED = ${SCOTCH_TEST_LIB_COMPILED}" ) message( STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " "SCOTCH_TEST_LIB_COMPILE_OUTPUT = ${SCOTCH_TEST_LIB_COMPILE_OUTPUT}" ) message( STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " "SCOTCH_TEST_LIB_EXITCODE = ${SCOTCH_TEST_LIB_EXITCODE}" ) message( STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " "SCOTCH_TEST_LIB_OUTPUT = ${SCOTCH_TEST_LIB_OUTPUT}" ) endif() endif() # If program does not run, try adding zlib library and test again if(NOT SCOTCH_TEST_RUNS) if(NOT ZLIB_FOUND) find_package(ZLIB) endif() if(ZLIB_INCLUDE_DIRS AND ZLIB_LIBRARIES) set(CMAKE_REQUIRED_INCLUDES ${CMAKE_REQUIRED_INCLUDES} ${ZLIB_INCLUDE_DIRS} ) set(CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES} ${ZLIB_LIBRARIES} ) message(STATUS "Performing test SCOTCH_ZLIB_TEST_RUNS") try_run( SCOTCH_ZLIB_TEST_LIB_EXITCODE SCOTCH_ZLIB_TEST_LIB_COMPILED ${CMAKE_CURRENT_BINARY_DIR} ${SCOTCH_TEST_LIB_CPP} CMAKE_FLAGS "-DINCLUDE_DIRECTORIES:STRING=${CMAKE_REQUIRED_INCLUDES}" "-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}" COMPILE_OUTPUT_VARIABLE SCOTCH_ZLIB_TEST_LIB_COMPILE_OUTPUT RUN_OUTPUT_VARIABLE SCOTCH_ZLIB_TEST_LIB_OUTPUT ) # Add zlib flags if required and set test run to 'true' if(SCOTCH_ZLIB_TEST_LIB_COMPILED AND SCOTCH_ZLIB_TEST_LIB_EXITCODE EQUAL 0 ) message(STATUS "Performing test SCOTCH_ZLIB_TEST_RUNS - Success") set(SCOTCH_INCLUDE_DIRS ${SCOTCH_INCLUDE_DIRS} ${ZLIB_INCLUDE_DIRS}) set(SCOTCH_LIBRARIES ${SCOTCH_LIBRARIES} ${ZLIB_LIBRARIES}) set(SCOTCH_TEST_RUNS TRUE) else() message(STATUS "Performing test SCOTCH_ZLIB_TEST_RUNS - Failed") if(SCOTCH_DEBUG) message( STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " "SCOTCH_ZLIB_TEST_LIB_COMPILED = ${SCOTCH_ZLIB_TEST_LIB_COMPILED}" ) message( STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " "SCOTCH_ZLIB_TEST_LIB_COMPILE_OUTPUT = ${SCOTCH_ZLIB_TEST_LIB_COMPILE_OUTPUT}" ) message( STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " "SCOTCH_TEST_LIB_EXITCODE = ${SCOTCH_TEST_LIB_EXITCODE}" ) message( STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " "SCOTCH_TEST_LIB_OUTPUT = ${SCOTCH_TEST_LIB_OUTPUT}" ) endif() endif() endif() endif() endif() endif() # Standard package handling find_package_handle_standard_args( SCOTCH "SCOTCH could not be found. Be sure to set SCOTCH_DIR." SCOTCH_LIBRARIES SCOTCH_INCLUDE_DIRS SCOTCH_TEST_RUNS ) if(SCOTCH_FOUND AND NOT TARGET SCOTCH::ptscotch) add_library(SCOTCH::ptscotch INTERFACE IMPORTED) set_property(TARGET SCOTCH::ptscotch PROPERTY INTERFACE_LINK_LIBRARIES "${SCOTCH_LIBRARIES}") set_property(TARGET SCOTCH::ptscotch PROPERTY INTERFACE_INCLUDE_DIRECTORIES "${SCOTCH_INCLUDE_DIRS}") endif() fenics-dolfinx-0.9.0/cpp/cmake/modules/FindUFCx.cmake000066400000000000000000000060131470520666000223600ustar00rootroot00000000000000#============================================================================= # - Try to find UFCx by interrogating the Python module FFCx # Once done this will define # # UFCX_FOUND - system has UFCx # UFCX_INCLUDE_DIRS - include directories for UFCx # UFCX_SIGNATURE - signature for UFCx # UFCX_VERSION - version for UFCx # #============================================================================= # Copyright (C) 2010-2021 Johannes Ring and Garth N. Wells # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. #============================================================================= find_package( Python3 COMPONENTS Interpreter REQUIRED ) message( STATUS "Asking Python module FFCx for location of ufcx.h..." ) # Get include path execute_process( COMMAND ${Python3_EXECUTABLE} -c "import ffcx.codegeneration, sys; sys.stdout.write(ffcx.codegeneration.get_include_path())" OUTPUT_VARIABLE UFCX_INCLUDE_DIR ) # Converts os native to cmake native path type cmake_path(SET UFCX_INCLUDE_DIR "${UFCX_INCLUDE_DIR}") # Get ufcx.h version if(UFCX_INCLUDE_DIR) set(UFCX_INCLUDE_DIRS ${UFCX_INCLUDE_DIR} CACHE STRING "Where to find ufcx.h" ) execute_process( COMMAND ${Python3_EXECUTABLE} -c "import ffcx, sys; sys.stdout.write(ffcx.__version__)" OUTPUT_VARIABLE UFCX_VERSION ) endif() # Compute hash of ufcx.h find_file(_UFCX_HEADER "ufcx.h" ${UFCX_INCLUDE_DIR}) if(_UFCX_HEADER) file(SHA1 ${_UFCX_HEADER} UFCX_SIGNATURE) endif() mark_as_advanced(UFCX_VERSION UFCX_INCLUDE_DIRS UFCX_SIGNATURE) find_package_handle_standard_args( UFCx REQUIRED_VARS UFCX_INCLUDE_DIRS UFCX_SIGNATURE UFCX_VERSION VERSION_VAR UFCX_VERSION HANDLE_VERSION_RANGE REASON_FAILURE_MESSAGE "UFCx could not be found." ) fenics-dolfinx-0.9.0/cpp/cmake/post-install/000077500000000000000000000000001470520666000207315ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/cmake/post-install/CMakeLists.txt000066400000000000000000000012651470520666000234750ustar00rootroot00000000000000install( CODE "MESSAGE( \"---------------------------------------------------------------------------- DOLFINx has now been installed in ${CMAKE_INSTALL_PREFIX} and demo programs have been installed in ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_DATAROOTDIR}/dolfinx/demo Don't forget to update your environment variables. This can be done easily using the helper file 'dolfinx.conf' which sets the appropriate variables (for users of the Bash shell). To update your environment variables, run the following command: source ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}/dolfinx/dolfinx.conf ----------------------------------------------------------------------------\")" ) fenics-dolfinx-0.9.0/cpp/cmake/scripts/000077500000000000000000000000001470520666000177675ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/cmake/scripts/generate-cmakefiles.py000066400000000000000000000215701470520666000242410ustar00rootroot00000000000000# Copyright (C) 2017-2018 Chris N. Richardson and Garth N. Wells # # This file is part of DOLFINx (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later import os import warnings cmakelists_noufl_str = """# This file was generated by running # # python cmake/scripts/generate-cmakefiles.py from dolfinx/cpp # cmake_minimum_required(VERSION 3.19) set(PROJECT_NAME {project_name}) project(${{PROJECT_NAME}} LANGUAGES C CXX) # Set C++20 standard set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) if(NOT TARGET dolfinx) find_package(DOLFINX REQUIRED) endif() set(CMAKE_INCLUDE_CURRENT_DIR ON) add_executable(${{PROJECT_NAME}} {src_files}) target_link_libraries(${{PROJECT_NAME}} dolfinx) # Do not throw error for 'multi-line comments' (these are typical in rst which # includes LaTeX) include(CheckCXXCompilerFlag) check_cxx_compiler_flag("-Wno-comment" HAVE_NO_MULTLINE) set_source_files_properties( main.cpp PROPERTIES COMPILE_FLAGS "$<$:-Wno-comment -Wall -Wextra -pedantic -Werror>" ) # Test targets (used by DOLFINx testing system) set(TEST_PARAMETERS2 -np 2 ${{MPIEXEC_PARAMS}} "./${{PROJECT_NAME}}") set(TEST_PARAMETERS3 -np 3 ${{MPIEXEC_PARAMS}} "./${{PROJECT_NAME}}") add_test(NAME ${{PROJECT_NAME}}_mpi_2 COMMAND "mpirun" ${{TEST_PARAMETERS2}}) add_test(NAME ${{PROJECT_NAME}}_mpi_3 COMMAND "mpirun" ${{TEST_PARAMETERS3}}) add_test(NAME ${{PROJECT_NAME}}_serial COMMAND ${{PROJECT_NAME}}) """ cmakelists_nocomplex_str = """# This file was generated by running # # python cmake/scripts/generate-cmakefiles from dolfinx/cpp # cmake_minimum_required(VERSION 3.19) set(PROJECT_NAME {project_name}) project(${{PROJECT_NAME}} LANGUAGES C CXX) # Set C++20 standard set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) if(NOT TARGET dolfinx) find_package(DOLFINX REQUIRED) endif() include(CheckSymbolExists) set(CMAKE_REQUIRED_INCLUDES ${{PETSC_INCLUDE_DIRS}}) check_symbol_exists(PETSC_USE_COMPLEX petscsystypes.h PETSC_SCALAR_COMPLEX) if(PETSC_SCALAR_COMPLEX EQUAL 1) message(STATUS "** This demo does not support complex mode") else() check_symbol_exists(PETSC_USE_REAL_DOUBLE petscsystypes.h PETSC_REAL_DOUBLE) if(PETSC_REAL_DOUBLE EQUAL 1) set(SCALAR_TYPE "--scalar_type=float64") else() set(SCALAR_TYPE "--scalar_type=float32") endif() # Add target to compile UFL files add_custom_command( OUTPUT {ufl_c_files} COMMAND ffcx ${{CMAKE_CURRENT_SOURCE_DIR}}/{ufl_files} ${{SCALAR_TYPE}} VERBATIM DEPENDS {ufl_files} COMMENT "Compile {ufl_files} using FFCx" ) set(CMAKE_INCLUDE_CURRENT_DIR ON) add_executable(${{PROJECT_NAME}} {src_files} ${{CMAKE_CURRENT_BINARY_DIR}}/{ufl_c_files}) target_link_libraries(${{PROJECT_NAME}} dolfinx) # Do not throw error for 'multi-line comments' (these are typical in rst which # includes LaTeX) include(CheckCXXCompilerFlag) check_cxx_compiler_flag("-Wno-comment" HAVE_NO_MULTLINE) set_source_files_properties( main.cpp PROPERTIES COMPILE_FLAGS "$<$:-Wno-comment -Wall -Wextra -pedantic -Werror>" ) # Test targets (used by DOLFINx testing system) set(TEST_PARAMETERS2 -np 2 ${{MPIEXEC_PARAMS}} "./${{PROJECT_NAME}}") set(TEST_PARAMETERS3 -np 3 ${{MPIEXEC_PARAMS}} "./${{PROJECT_NAME}}") add_test(NAME ${{PROJECT_NAME}}_mpi_2 COMMAND "mpirun" ${{TEST_PARAMETERS2}}) add_test(NAME ${{PROJECT_NAME}}_mpi_3 COMMAND "mpirun" ${{TEST_PARAMETERS3}}) add_test(NAME ${{PROJECT_NAME}}_serial COMMAND ${{PROJECT_NAME}}) endif() """ cmakelists_str = """# This file was generated by running # # python cmake/scripts/generate-cmakefiles from dolfinx/cpp # cmake_minimum_required(VERSION 3.19) set(PROJECT_NAME {project_name}) project(${{PROJECT_NAME}} LANGUAGES C CXX) # Set C++20 standard set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) if(NOT TARGET dolfinx) find_package(DOLFINX REQUIRED) endif() include(CheckSymbolExists) set(CMAKE_REQUIRED_INCLUDES ${{PETSC_INCLUDE_DIRS}}) check_symbol_exists(PETSC_USE_COMPLEX petscsystypes.h PETSC_SCALAR_COMPLEX) check_symbol_exists(PETSC_USE_REAL_DOUBLE petscsystypes.h PETSC_REAL_DOUBLE) # Add target to compile UFL files if(PETSC_SCALAR_COMPLEX EQUAL 1) if(PETSC_REAL_DOUBLE EQUAL 1) set(SCALAR_TYPE "--scalar_type=complex128") else() set(SCALAR_TYPE "--scalar_type=complex64") endif() else() if(PETSC_REAL_DOUBLE EQUAL 1) set(SCALAR_TYPE "--scalar_type=float64") else() set(SCALAR_TYPE "--scalar_type=float32") endif() endif() add_custom_command( OUTPUT {ufl_c_files} COMMAND ffcx ${{CMAKE_CURRENT_SOURCE_DIR}}/{ufl_files} ${{SCALAR_TYPE}} VERBATIM DEPENDS {ufl_files} COMMENT "Compile {ufl_files} using FFCx" ) set(CMAKE_INCLUDE_CURRENT_DIR ON) add_executable(${{PROJECT_NAME}} {src_files} ${{CMAKE_CURRENT_BINARY_DIR}}/{ufl_c_files}) target_link_libraries(${{PROJECT_NAME}} dolfinx) # Do not throw error for 'multi-line comments' (these are typical in rst which # includes LaTeX) include(CheckCXXCompilerFlag) check_cxx_compiler_flag("-Wno-comment" HAVE_NO_MULTLINE) set_source_files_properties( main.cpp PROPERTIES COMPILE_FLAGS "$<$:-Wno-comment -Wall -Wextra -pedantic -Werror>" ) # Test targets (used by DOLFINx testing system) set(TEST_PARAMETERS2 -np 2 ${{MPIEXEC_PARAMS}} "./${{PROJECT_NAME}}") set(TEST_PARAMETERS3 -np 3 ${{MPIEXEC_PARAMS}} "./${{PROJECT_NAME}}") add_test(NAME ${{PROJECT_NAME}}_mpi_2 COMMAND "mpirun" ${{TEST_PARAMETERS2}}) add_test(NAME ${{PROJECT_NAME}}_mpi_3 COMMAND "mpirun" ${{TEST_PARAMETERS3}}) add_test(NAME ${{PROJECT_NAME}}_serial COMMAND ${{PROJECT_NAME}}) """ # Subdirectories sub_directories = ["demo"] # Prefix map for subdirectories executable_prefixes = dict(demo="demo_") # Main file name map for subdirectories main_file_names = dict(demo=set(["main.cpp"])) # Projects that use custom CMakeLists.txt (shouldn't overwrite) exclude_projects = [] def generate_cmake_files(subdirectory, generated_files): """Search for C++ code and write CMakeLists.txt files""" cwd = os.getcwd() executable_prefix = executable_prefixes[subdirectory] main_file_name = main_file_names[subdirectory] for root, dirs, files in os.walk(cwd + "/" + subdirectory): cpp_files = set() ufl_files = set() ufl_c_files = set() executable_names = set() program_dir = root program_name = os.path.split(root)[-1] skip = False for exclude in exclude_projects: if exclude in root: skip = True if skip: print("Skipping custom CMakeLists.txt file:", root) continue if "main.cpp" in os.listdir(program_dir): name_forms = dict(project_name=executable_prefix + program_name, src_files="NOT_SET") for f in os.listdir(program_dir): filename, extension = os.path.splitext(f) if extension == ".cpp": cpp_files.add(f) elif extension == ".py": ufl_files.add(f) ufl_c_files.add(f.replace(".py", ".c")) if ".cpp.rst" in f: cpp_files.add(filename) # If no .cpp, continue if not cpp_files: continue if len(ufl_files) > 1: raise RuntimeError("CMake generation supports exactly one UFL file") # Name of demo and cpp source files # print("**, ", main_file_name, cpp_files) assert not main_file_name.isdisjoint(cpp_files) # If directory contains a main file we assume that only one # executable should be generated for this directory and all # other .cpp files should be linked to this name_forms["src_files"] = " ".join(cpp_files) name_forms["ufl_files"] = " ".join(ufl_files) name_forms["ufl_c_files"] = " ".join(ufl_c_files) # Check for duplicate executable names if program_name not in executable_names: executable_names.add(program_name) else: warnings.warn("Duplicate executable names found when generating CMakeLists.txt files.") # Write file filename = os.path.join(program_dir, "CMakeLists.txt") generated_files.append(filename) with open(filename, "w") as f: if program_name in ["hyperelasticity"]: f.write(cmakelists_nocomplex_str.format(**name_forms)) elif len(ufl_files) == 0: f.write(cmakelists_noufl_str.format(**name_forms)) else: f.write(cmakelists_str.format(**name_forms)) # Generate CMakeLists.txt files for all subdirectories generated_files = [] for subdirectory in sub_directories: generate_cmake_files(subdirectory, generated_files) # Print list of generated files print("The following files were generated:") print("\n".join(generated_files)) fenics-dolfinx-0.9.0/cpp/cmake/templates/000077500000000000000000000000001470520666000202765ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/cmake/templates/DOLFINXConfig.cmake.in000066400000000000000000000055311470520666000241420ustar00rootroot00000000000000# * Build details for DOLFINx: Dynamic Object-oriented Library for # * FINite element computation # # This file has been automatically generated. # FIXME: Check that naming conforms to CMake standards @PACKAGE_INIT@ include(CMakeFindDependencyMacro) find_dependency(MPI REQUIRED) find_dependency(spdlog REQUIRED) find_dependency(pugixml REQUIRED) if(POLICY CMP0167) cmake_policy(SET CMP0167 NEW) # Boost CONFIG mode endif() # Check for Boost if(DEFINED ENV{BOOST_ROOT} OR DEFINED BOOST_ROOT) set(Boost_NO_SYSTEM_PATHS on) endif() set(Boost_USE_MULTITHREADED $ENV{BOOST_USE_MULTITHREADED}) set(Boost_VERBOSE TRUE) find_package(Boost 1.70 REQUIRED timer) if(@ufcx_FOUND@) find_dependency(ufcx) endif() # Basix if(@DOLFINX_BASIX_PYTHON@) find_package( Python3 COMPONENTS Interpreter QUIET ) if(Python3_Interpreter_FOUND) message(STATUS "Checking for Basix hints with ${Python3_EXECUTABLE}") execute_process( COMMAND ${Python3_EXECUTABLE} -c "import basix, os, sys; sys.stdout.write(os.path.dirname(basix.__file__))" OUTPUT_VARIABLE BASIX_PY_DIR RESULT_VARIABLE BASIX_PY_COMMAND_RESULT ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE ) endif() if(BASIX_PY_DIR) # TODO: CMake 3.20 has more modern cmake_path. file(TO_CMAKE_PATH "${BASIX_PY_DIR}" BASIX_PY_DIR) message(STATUS "Adding ${BASIX_PY_DIR} to Basix search hints") else() message(STATUS "No Basix hint was found.") endif() endif() find_dependency(Basix REQUIRED CONFIG HINTS ${BASIX_PY_DIR}) # HDF5 if(NOT TARGET hdf5::hdf5) set(HDF5_PREFER_PARALLEL TRUE) set(HDF5_FIND_DEBUG TRUE) find_dependency(HDF5 COMPONENTS C) if(HDF5_FOUND AND NOT HDF5_IS_PARALLEL) message(FATAL_ERROR "Found serial HDF5 build, MPI HDF5 build required") endif() endif() if(@PETSC_FOUND@) if(NOT TARGET PkgConfig::PETSC) find_package(PkgConfig REQUIRED) set(ENV{PKG_CONFIG_PATH} "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/lib/pkgconfig:$ENV{PETSC_DIR}/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}" ) pkg_search_module(PETSC REQUIRED IMPORTED_TARGET PETSc petsc) endif() endif() if(@SLEPC_FOUND@) if(NOT TARGET PkgConfig::SLEPC) find_package(PkgConfig REQUIRED) set(ENV{PKG_CONFIG_PATH} "$ENV{SLEPC_DIR}/$ENV{PETSC_ARCH}/lib/pkgconfig:$ENV{SLEPC_DIR}/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}" ) set(ENV{PKG_CONFIG_PATH} "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/lib/pkgconfig:$ENV{PETSC_DIR}/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}" ) set(ENV{PKG_CONFIG_PATH} "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}:$ENV{PETSC_DIR}:$ENV{PKG_CONFIG_PATH}" ) pkg_search_module(SLEPC REQUIRED IMPORTED_TARGET SLEPc slepc) endif() endif() if(@ADIOS2_FOUND@) find_dependency(ADIOS2 2.8.1) endif() if(NOT TARGET dolfinx) include("${CMAKE_CURRENT_LIST_DIR}/DOLFINXTargets.cmake") endif() check_required_components(DOLFINX) fenics-dolfinx-0.9.0/cpp/cmake/templates/cmake_uninstall.cmake.in000066400000000000000000000014471470520666000250640ustar00rootroot00000000000000if(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") message( FATAL_ERROR "Cannot find install manifest: \"@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt\"" ) endif() file(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files) string(REGEX REPLACE "\n" ";" files "${files}") foreach(file ${files}) message(STATUS "Uninstalling \"$ENV{DESTDIR}${file}\"") if(EXISTS "$ENV{DESTDIR}${file}") exec_program( "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" OUTPUT_VARIABLE rm_out RETURN_VALUE rm_retval ) if(NOT "${rm_retval}" STREQUAL 0) message(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"") endif() else() message(STATUS "File \"$ENV{DESTDIR}${file}\" does not exist.") endif() endforeach() fenics-dolfinx-0.9.0/cpp/cmake/templates/dolfinx.conf.in000066400000000000000000000010521470520666000232130ustar00rootroot00000000000000# Helper file for setting non-default DOLFINx environment variables # Common Unix variables export @OS_LIBRARY_PATH_NAME@=@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_LIBDIR@:$@OS_LIBRARY_PATH_NAME@ export PATH=@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_BINDIR@:$PATH export PKG_CONFIG_PATH=@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_LIBDIR@/pkgconfig:$PKG_CONFIG_PATH export CMAKE_PREFIX_PATH=@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_LIBDIR@/cmake:$CMAKE_PREFIX_PATH # Special macOS variables export DYLD_FRAMEWORK_PATH=/opt/local/Library/Frameworks:$DYLD_FRAMEWORK_PATH fenics-dolfinx-0.9.0/cpp/cmake/templates/dolfinx.pc.in000066400000000000000000000011171470520666000226720ustar00rootroot00000000000000# pkg-config configuration for DOLFINx prefix=@CMAKE_INSTALL_PREFIX@ exec_prefix=@CMAKE_INSTALL_PREFIX@ libdir=${exec_prefix}/@CMAKE_INSTALL_LIBDIR@ includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@ compiler=@CMAKE_CXX_COMPILER@ definitions=@PKG_DEFINITIONS@ extlibs=@DOLFINX_EXT_LIBS@ Name: DOLFINx Description: Dynamic Object-oriented Library for FINite element computation Version: @DOLFINX_VERSION@ Requires: @PKG_REQUIRES@ Conflicts: Libs: @PKG_LINKFLAGS@ -L${libdir} -ldolfinx Cflags: @PKG_CXXFLAGS@ -DDOLFINX_VERSION=\"@DOLFINX_VERSION@\" ${definitions} -I${includedir} @PKG_INCLUDES@ fenics-dolfinx-0.9.0/cpp/demo/000077500000000000000000000000001470520666000161445ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/demo/CMakeLists.txt000066400000000000000000000014131470520666000207030ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.16) project(dolfinx-demos) # Find DOLFINx config file find_package(DOLFINX REQUIRED) # Enable testing enable_testing() # Macro to add demos. Some subdirectories might be skipped because demos may not # be running in both real and complex modes. macro(add_demo_subdirectory subdir) if(IS_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/${subdir}) add_subdirectory(${subdir}) endif() endmacro(add_demo_subdirectory) # Add demos add_demo_subdirectory(biharmonic) add_demo_subdirectory(codim_0_assembly) add_demo_subdirectory(custom_kernel) add_demo_subdirectory(poisson) add_demo_subdirectory(poisson_matrix_free) add_demo_subdirectory(hyperelasticity) add_demo_subdirectory(interpolation-io) add_demo_subdirectory(interpolation_different_meshes) fenics-dolfinx-0.9.0/cpp/demo/README.md000066400000000000000000000042321470520666000174240ustar00rootroot00000000000000Documenting DOLFINx demos ========================= The documentation for the DOLFINx demos is written by hand and located together with the demos in the DOLFINx source tree. To document a (new) DOLFINx demo located in the directory foo (for instance pde/poisson), follow the two steps below. In general, the simplest way is probably to look at one of the documented demos for instance (demo/pde/poisson/) and follow the same setup. 1) Add these 3 files * foo/common.txt -- containing common information such as the main features the demo illustrates and, if applicable, a mathematical description of the differential equation that is solved. This file should then be included in the C++ and Python versions. * foo/cpp/documentation.rst -- containing the reST source file with the documentation that is specific to the C++ version of the demo. * foo/python/documentation.rst -- containing the reST source file with the documentation that is specific to the Python version of the demo. If either the C++ or the Python version of the demo does not exist, feel free to add the version and continue. 2) Move the directory foo from the directory undocumented/ to the suitable directory (for instance pde/ or la/). Note The demo documentation is automatically included in the complete DOLFINx documentation when running make doc after building DOLFINx. While documenting a demo, it may be handy to only run make doc_demo and then make doc_html_[python|cpp]. Note Tests for the validity of the code snippets used in the demo documentation are included in the standard DOLFINx tests. C++ and Python specific contents ================================ The C++ and Python documentation reST source files should * Explain each step of the solution procedure. Do this by including and explaining code snippets from the demo source code. * Include links to the API documentation using the :cpp:class: and :py:class: directives. Note that for the Python classes, the full module path is required (for instance py:class:dolfinx.cpp.NewtonSolver) * Include the complete set of files needed to run the demo using the include directive. fenics-dolfinx-0.9.0/cpp/demo/biharmonic/000077500000000000000000000000001470520666000202575ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/demo/biharmonic/CMakeLists.txt000066400000000000000000000040501470520666000230160ustar00rootroot00000000000000# This file was generated by running # # python cmake/scripts/generate-cmakefiles from dolfinx/cpp # cmake_minimum_required(VERSION 3.19) set(PROJECT_NAME demo_biharmonic) project(${PROJECT_NAME} LANGUAGES C CXX) # Set C++20 standard set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) if(NOT TARGET dolfinx) find_package(DOLFINX REQUIRED) endif() include(CheckSymbolExists) set(CMAKE_REQUIRED_INCLUDES ${PETSC_INCLUDE_DIRS}) check_symbol_exists(PETSC_USE_COMPLEX petscsystypes.h PETSC_SCALAR_COMPLEX) check_symbol_exists(PETSC_USE_REAL_DOUBLE petscsystypes.h PETSC_REAL_DOUBLE) # Add target to compile UFL files if(PETSC_SCALAR_COMPLEX EQUAL 1) if(PETSC_REAL_DOUBLE EQUAL 1) set(SCALAR_TYPE "--scalar_type=complex128") else() set(SCALAR_TYPE "--scalar_type=complex64") endif() else() if(PETSC_REAL_DOUBLE EQUAL 1) set(SCALAR_TYPE "--scalar_type=float64") else() set(SCALAR_TYPE "--scalar_type=float32") endif() endif() add_custom_command( OUTPUT biharmonic.c COMMAND ffcx ${CMAKE_CURRENT_SOURCE_DIR}/biharmonic.py ${SCALAR_TYPE} VERBATIM DEPENDS biharmonic.py COMMENT "Compile biharmonic.py using FFCx" ) set(CMAKE_INCLUDE_CURRENT_DIR ON) add_executable(${PROJECT_NAME} main.cpp ${CMAKE_CURRENT_BINARY_DIR}/biharmonic.c) target_link_libraries(${PROJECT_NAME} dolfinx) # Do not throw error for 'multi-line comments' (these are typical in rst which # includes LaTeX) include(CheckCXXCompilerFlag) check_cxx_compiler_flag("-Wno-comment" HAVE_NO_MULTLINE) set_source_files_properties( main.cpp PROPERTIES COMPILE_FLAGS "$<$:-Wno-comment -Wall -Wextra -pedantic -Werror>" ) # Test targets (used by DOLFINx testing system) set(TEST_PARAMETERS2 -np 2 ${MPIEXEC_PARAMS} "./${PROJECT_NAME}") set(TEST_PARAMETERS3 -np 3 ${MPIEXEC_PARAMS} "./${PROJECT_NAME}") add_test(NAME ${PROJECT_NAME}_mpi_2 COMMAND "mpirun" ${TEST_PARAMETERS2}) add_test(NAME ${PROJECT_NAME}_mpi_3 COMMAND "mpirun" ${TEST_PARAMETERS3}) add_test(NAME ${PROJECT_NAME}_serial COMMAND ${PROJECT_NAME}) fenics-dolfinx-0.9.0/cpp/demo/biharmonic/biharmonic.py000066400000000000000000000042001470520666000227400ustar00rootroot00000000000000# The first step is to define the variational problem at hand. We define # the variational problem in UFL terms in a separate form file. We begin # by defining the finite element: from basix.ufl import element from ufl import ( CellDiameter, Coefficient, Constant, FacetNormal, FunctionSpace, Mesh, TestFunction, TrialFunction, avg, div, dS, dx, grad, inner, jump, ) e = element("Lagrange", "triangle", 2) # The first argument to :py:class:`FiniteElement` is the finite element # family, the second argument specifies the domain, while the third # argument specifies the polynomial degree. Thus, in this case, our # element `element` consists of second-order, continuous Lagrange basis # functions on triangles (or in order words, continuous piecewise linear # polynomials on triangles). # # Next, we use this element to initialize the trial and test functions # ($u$ and $v$) and the coefficient function $f$: coord_element = element("Lagrange", "triangle", 1, shape=(2,)) mesh = Mesh(coord_element) V = FunctionSpace(mesh, e) u = TrialFunction(V) v = TestFunction(V) f = Coefficient(V) # Next, the outward unit normal to cell boundaries and a measure of the # cell size are defined. The average size of cells sharing a facet will # be used (`h_avg`). The UFL syntax `('+')` and `('-')` restricts a # function to the `('+')` and `('-')` sides of a facet, respectively. # The penalty parameter `alpha` is made a :cpp:class:`Constant` so # that it can be changed in the program without regenerating the code. # Normal component, mesh size and right-hand side n = FacetNormal(mesh) h = CellDiameter(mesh) h_avg = (h("+") + h("-")) / 2 alpha = Constant(mesh) # Finally, we define the bilinear and linear forms according to the # variational formulation of the equations. Integrals over # internal facets are indicated by `*dS`. # Bilinear form a = ( inner(div(grad(u)), div(grad(v))) * dx - inner(avg(div(grad(u))), jump(grad(v), n)) * dS - inner(jump(grad(u), n), avg(div(grad(v)))) * dS + alpha / h_avg * inner(jump(grad(u), n), jump(grad(v), n)) * dS ) # Linear form L = inner(f, v) * dx fenics-dolfinx-0.9.0/cpp/demo/biharmonic/main.cpp000066400000000000000000000235751470520666000217230ustar00rootroot00000000000000// # Biharmonic equation // // This demo illustrates how to: // // * Solve a linear partial differential equation // * Use a discontinuous Galerkin method // * Solve a fourth-order differential equation // // ## Equation and problem definition // // ### Strong formulation // // The biharmonic equation is a fourth-order elliptic equation. On the // domain $\Omega \subset \mathbb{R}^{d}$, $1 \le d \le 3$, it reads // // $$ // \nabla^{4} u = f \quad {\rm in} \ \Omega, // $$ // // where $\nabla^{4} \equiv \nabla^{2} \nabla^{2}$ is the biharmonic // operator and $f$ is a prescribed source term. To formulate a complete // boundary value problem, the biharmonic equation must be complemented // by suitable boundary conditions. // // ### Weak formulation // // Multiplying the biharmonic equation by a test function and integrating // by parts twice leads to a problem of second-order derivatives, which would // require $H^{2}$ conforming (roughly $C^{1}$ continuous) basis functions. // To solve the biharmonic equation using Lagrange finite element basis // functions, the biharmonic equation can be split into two second-order // equations (see the Mixed Poisson demo for a mixed method for the Poisson // equation), or a variational formulation can be constructed that imposes // weak continuity of normal derivatives between finite element cells. // This demo uses a discontinuous Galerkin approach to impose continuity // of the normal derivative weakly. // // Consider a triangulation $\mathcal{T}$ of the domain $\Omega$, where // the set of interior facets is denoted by $\mathcal{E}_h^{\rm int}$. // Functions evaluated on opposite sides of a facet are indicated by the // subscripts $+$ and $-$. // Using the standard continuous Lagrange finite element space // // $$ // V = \left\{v \in H^{1}_{0}(\Omega)\,:\, v \in P_{k}(K) \ // \forall \ K \in \mathcal{T} \right\} // $$ // // and considering the boundary conditions // // \begin{align} // u &= 0 \quad {\rm on} \ \partial\Omega, \\ // \nabla^{2} u &= 0 \quad {\rm on} \ \partial\Omega, // \end{align} // // a weak formulation of the biharmonic problem reads: find $u \in V$ such that // // $$ // a(u,v)=L(v) \quad \forall \ v \in V, // $$ // // where the bilinear form is // // \begin{align*} // a(u, v) &= // \sum_{K \in \mathcal{T}} \int_{K} \nabla^{2} u \nabla^{2} v \, {\rm d}x \\ // &\qquad+\sum_{E \in \mathcal{E}_h^{\rm int}}\left(\int_{E} \frac{\alpha}{h_E} // [\!\![ \nabla u ]\!\!] [\!\![ \nabla v ]\!\!] \, {\rm d}s // - \int_{E} \left<\nabla^{2} u \right>[\!\![ \nabla v ]\!\!] \, {\rm d}s // - \int_{E} [\!\![ \nabla u ]\!\!] \left<\nabla^{2} v \right> \, // {\rm d}s\right) // \end{align*} // // and the linear form is // // $$ // L(v) = \int_{\Omega} fv \, {\rm d}x. // $$ // // Furthermore, $\left< u \right> = \frac{1}{2} (u_{+} + u_{-})$, // $[\!\![ w ]\!\!] = w_{+} \cdot n_{+} + w_{-} \cdot n_{-}$, // $\alpha \ge 0$ is a penalty parameter and // $h_E$ is a measure of the cell size. // // The input parameters for this demo are defined as follows: // // - $\Omega = [0,1] \times [0,1]$ (a unit square) // - $\alpha = 8.0$ (penalty parameter) // - $f = 4.0 \pi^4\sin(\pi x)\sin(\pi y)$ (source term) // // // // ## Implementation // // The implementation is in two files: a form file containing the // definition of the variational forms expressed in UFL and a C++ file // containing the actual solver. // // Running this demo requires the files: {download}`demo_biharmonic/main.cpp`, // {download}`demo_biharmonic/biharmonic.py` and // {download}`demo_biharmonic/CMakeLists.txt`. // // ### UFL form file // // The UFL file is implemented in {download}`demo_biharmonic/biharmonic.py`. // ````{admonition} UFL form implemented in python // :class: dropdown // ![ufl-code] // ```` // // ````{note} // TODO: explanation on how to run cmake and/or shell commands for `ffcx`. // To compile biharmonic.py using FFCx with an option // for PETSc scalar type `float64` one would execute the command // ```bash // ffcx biharmonic.py --scalar_type=float64 // ``` // ```` // // ### C++ program // // The main solver is implemented in the {download}`demo_biharmonic/main.cpp` // file. // // At the top we include the DOLFINx header file and the generated // header file "biharmonic.h" containing the variational forms for the // Biharmonic equation, which are defined in the UFL form file. For // convenience we also include the DOLFINx namespace. #include "biharmonic.h" #include #include #include #include #include #include #include #include #include using namespace dolfinx; using T = PetscScalar; using U = typename dolfinx::scalar_value_type_t; // Inside the `main` function, we begin by defining a mesh of the // domain. As the unit square is a very standard domain, we can use a // built-in mesh provided by the {cpp:class}`UnitSquareMesh` factory. In // order to create a mesh consisting of 32 x 32 squares with each square // divided into two triangles, and the finite element space (specified // in the form file) defined relative to this mesh, we do as follows int main(int argc, char* argv[]) { dolfinx::init_logging(argc, argv); PetscInitialize(&argc, &argv, nullptr, nullptr); { // Create mesh auto part = mesh::create_cell_partitioner(mesh::GhostMode::shared_facet); auto mesh = std::make_shared>( mesh::create_rectangle(MPI_COMM_WORLD, {{{0.0, 0.0}, {1.0, 1.0}}}, {32, 32}, mesh::CellType::triangle, part)); // A function space object, which is defined in the generated code, // is created: auto element = basix::create_element( basix::element::family::P, basix::cell::type::triangle, 2, basix::element::lagrange_variant::unset, basix::element::dpc_variant::unset, false); // Create function space auto V = std::make_shared>( fem::create_functionspace(mesh, element)); // The source function $f$ and the penalty term $\alpha$ are // declared: auto f = std::make_shared>(V); f->interpolate( [](auto x) -> std::pair, std::vector> { std::vector f; for (std::size_t p = 0; p < x.extent(1); ++p) { auto pi = std::numbers::pi; f.push_back(4.0 * std::pow(pi, 4) * std::sin(pi * x(0, p)) * std::sin(pi * x(1, p))); } return {f, {f.size()}}; }); auto alpha = std::make_shared>(8.0); // Define variational forms auto a = std::make_shared>(fem::create_form( *form_biharmonic_a, {V, V}, {}, {{"alpha", alpha}}, {}, {})); auto L = std::make_shared>( fem::create_form(*form_biharmonic_L, {V}, {{"f", f}}, {}, {}, {})); // Now, the Dirichlet boundary condition ($u = 0$) can be // created using the class {cpp:class}`DirichletBC`. A // {cpp:class}`DirichletBC` takes two arguments: the value of the // boundary condition, and the part of the boundary on which the // condition applies. In our example, the value of the boundary // condition (0.0) can represented using a {cpp:class}`Function`, // and the Dirichlet boundary is defined by the indices of degrees // of freedom to which the boundary condition applies. The // definition of the Dirichlet boundary condition then looks as // follows: // Define boundary condition auto facets = mesh::exterior_facet_indices(*mesh->topology()); const auto bdofs = fem::locate_dofs_topological( *V->mesh()->topology_mutable(), *V->dofmap(), 1, facets); auto bc = std::make_shared>(0.0, bdofs, V); // Now, we have specified the variational forms and can consider // the solution of the variational problem. First, we need to // define a {cpp:class}`Function` `u` to store the solution. (Upon // initialization, it is simply set to the zero function.) Next, we // can call the `solve` function with the arguments `a == L`, `u` // and `bc` as follows: // Compute solution fem::Function u(V); auto A = la::petsc::Matrix(fem::petsc::create_matrix(*a), false); la::Vector b(L->function_spaces()[0]->dofmap()->index_map, L->function_spaces()[0]->dofmap()->index_map_bs()); MatZeroEntries(A.mat()); fem::assemble_matrix(la::petsc::Matrix::set_block_fn(A.mat(), ADD_VALUES), *a, {bc}); MatAssemblyBegin(A.mat(), MAT_FLUSH_ASSEMBLY); MatAssemblyEnd(A.mat(), MAT_FLUSH_ASSEMBLY); fem::set_diagonal(la::petsc::Matrix::set_fn(A.mat(), INSERT_VALUES), *V, {bc}); MatAssemblyBegin(A.mat(), MAT_FINAL_ASSEMBLY); MatAssemblyEnd(A.mat(), MAT_FINAL_ASSEMBLY); b.set(0.0); fem::assemble_vector(b.mutable_array(), *L); fem::apply_lifting(b.mutable_array(), {a}, {{bc}}, {}, T(1.0)); b.scatter_rev(std::plus()); bc->set(b.mutable_array(), std::nullopt); la::petsc::KrylovSolver lu(MPI_COMM_WORLD); la::petsc::options::set("ksp_type", "preonly"); la::petsc::options::set("pc_type", "lu"); lu.set_from_options(); lu.set_operator(A.mat()); la::petsc::Vector _u(la::petsc::create_vector_wrap(*u.x()), false); la::petsc::Vector _b(la::petsc::create_vector_wrap(b), false); lu.solve(_u.vec(), _b.vec()); // Update ghost values before output u.x()->scatter_fwd(); // The function `u` will be modified during the call to solve. A // {cpp:class}`Function` can be saved to a file. Here, we output the // solution to a `VTK` file (specified using the suffix `.pvd`) for // visualisation in an external program such as Paraview. // Save solution in VTK format io::VTKFile file(MPI_COMM_WORLD, "u.pvd", "w"); file.write({u}, 0.0); } PetscFinalize(); return 0; } fenics-dolfinx-0.9.0/cpp/demo/codim_0_assembly/000077500000000000000000000000001470520666000213555ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/demo/codim_0_assembly/CMakeLists.txt000066400000000000000000000040701470520666000241160ustar00rootroot00000000000000# This file was generated by running # # python cmake/scripts/generate-cmakefiles from dolfinx/cpp # cmake_minimum_required(VERSION 3.19) set(PROJECT_NAME demo_codim_0_assembly) project(${PROJECT_NAME} LANGUAGES C CXX) # Set C++20 standard set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) if(NOT TARGET dolfinx) find_package(DOLFINX REQUIRED) endif() include(CheckSymbolExists) set(CMAKE_REQUIRED_INCLUDES ${PETSC_INCLUDE_DIRS}) check_symbol_exists(PETSC_USE_COMPLEX petscsystypes.h PETSC_SCALAR_COMPLEX) check_symbol_exists(PETSC_USE_REAL_DOUBLE petscsystypes.h PETSC_REAL_DOUBLE) # Add target to compile UFL files if(PETSC_SCALAR_COMPLEX EQUAL 1) if(PETSC_REAL_DOUBLE EQUAL 1) set(SCALAR_TYPE "--scalar_type=complex128") else() set(SCALAR_TYPE "--scalar_type=complex64") endif() else() if(PETSC_REAL_DOUBLE EQUAL 1) set(SCALAR_TYPE "--scalar_type=float64") else() set(SCALAR_TYPE "--scalar_type=float32") endif() endif() add_custom_command( OUTPUT mixed_codim0.c COMMAND ffcx ${CMAKE_CURRENT_SOURCE_DIR}/mixed_codim0.py ${SCALAR_TYPE} VERBATIM DEPENDS mixed_codim0.py COMMENT "Compile mixed_codim0.py using FFCx" ) set(CMAKE_INCLUDE_CURRENT_DIR ON) add_executable(${PROJECT_NAME} main.cpp ${CMAKE_CURRENT_BINARY_DIR}/mixed_codim0.c) target_link_libraries(${PROJECT_NAME} dolfinx) # Do not throw error for 'multi-line comments' (these are typical in rst which # includes LaTeX) include(CheckCXXCompilerFlag) check_cxx_compiler_flag("-Wno-comment" HAVE_NO_MULTLINE) set_source_files_properties( main.cpp PROPERTIES COMPILE_FLAGS "$<$:-Wno-comment -Wall -Wextra -pedantic -Werror>" ) # Test targets (used by DOLFINx testing system) set(TEST_PARAMETERS2 -np 2 ${MPIEXEC_PARAMS} "./${PROJECT_NAME}") set(TEST_PARAMETERS3 -np 3 ${MPIEXEC_PARAMS} "./${PROJECT_NAME}") add_test(NAME ${PROJECT_NAME}_mpi_2 COMMAND "mpirun" ${TEST_PARAMETERS2}) add_test(NAME ${PROJECT_NAME}_mpi_3 COMMAND "mpirun" ${TEST_PARAMETERS3}) add_test(NAME ${PROJECT_NAME}_serial COMMAND ${PROJECT_NAME}) fenics-dolfinx-0.9.0/cpp/demo/codim_0_assembly/main.cpp000066400000000000000000000147371470520666000230210ustar00rootroot00000000000000// # Mixed assembly with a function mesh on a subset of cells // // This demo illustrates how to: // // * Create a submesh of co-dimension 0 // * Assemble a mixed formulation with function spaces defined on the sub mesh // and parent mesh #include "mixed_codim0.h" #include #include #include #include #include #include #include #include #include using namespace dolfinx; using T = PetscScalar; using U = typename dolfinx::scalar_value_type_t; int main(int argc, char* argv[]) { dolfinx::init_logging(argc, argv); PetscInitialize(&argc, &argv, nullptr, nullptr); { // Create mesh and function space auto part = mesh::create_cell_partitioner(mesh::GhostMode::shared_facet); auto mesh = std::make_shared>( mesh::create_rectangle(MPI_COMM_WORLD, {{{0.0, 0.0}, {2.0, 1.0}}}, {1, 4}, mesh::CellType::quadrilateral, part)); auto element = basix::create_element( basix::element::family::P, basix::cell::type::quadrilateral, 1, basix::element::lagrange_variant::unset, basix::element::dpc_variant::unset, false); auto V = std::make_shared>( fem::create_functionspace(mesh, element, {})); // Next we find all cells of the mesh with y<0.5 const int tdim = mesh->topology()->dim(); auto marked_cells = mesh::locate_entities( *mesh, tdim, [](auto x) { using U = typename decltype(x)::value_type; constexpr U eps = 1.0e-8; std::vector marker(x.extent(1), false); for (std::size_t p = 0; p < x.extent(1); ++p) { auto y = x(1, p); if (std::abs(y) <= 0.5 + eps) marker[p] = true; } return marker; }); // We create a MeshTags object where we mark these cells with 2, and any // other cell with 1 auto cell_map = mesh->topology()->index_map(tdim); std::size_t num_cells_local = mesh->topology()->index_map(tdim)->size_local() + mesh->topology()->index_map(tdim)->num_ghosts(); std::vector cells(num_cells_local); std::iota(cells.begin(), cells.end(), 0); std::vector values(num_cells_local, 1); std::for_each(marked_cells.begin(), marked_cells.end(), [&values](auto& c) { values[c] = 2; }); dolfinx::mesh::MeshTags cell_marker(mesh->topology(), tdim, cells, values); std::shared_ptr> submesh; std::vector submesh_to_mesh; { auto [_submesh, _submesh_to_mesh, v_map, g_map] = mesh::create_submesh(*mesh, tdim, cell_marker.find(2)); submesh = std::make_shared>(std::move(_submesh)); submesh_to_mesh = std::move(_submesh_to_mesh); } // We create the function space used for the trial space auto W = std::make_shared>( fem::create_functionspace(submesh, element, {})); // A mixed-domain form has functions defined over different meshes. The mesh // associated with the measure (dx, ds, etc.) is called the integration // domain. To assemble mixed-domain forms, maps must be provided taking // entities in the integration domain to entities on each mesh in the form. // Since one of our forms has a measure defined over `mesh` and involves a // function defined over `submesh`, we must provide a map from entities in // `mesh` to entities in `submesh`. This is simply the "inverse" of // `submesh_to_mesh`. std::vector mesh_to_submesh(num_cells_local, -1); for (std::size_t i = 0; i < submesh_to_mesh.size(); ++i) mesh_to_submesh[submesh_to_mesh[i]] = i; std::shared_ptr> const_ptr = submesh; std::map>, std::span> entity_maps = {{const_ptr, std::span(mesh_to_submesh.data(), mesh_to_submesh.size())}}; // Next we compute the integration entities on the integration domain `mesh` std::map< fem::IntegralType, std::vector>>> subdomain_map = {}; auto integration_entities = fem::compute_integration_domains( fem::IntegralType::cell, *mesh->topology(), cell_marker.find(2), tdim); subdomain_map[fem::IntegralType::cell].push_back( {3, std::span(integration_entities.data(), integration_entities.size())}); // We can now create the bi-linear form auto a_mixed = std::make_shared>( fem::create_form(*form_mixed_codim0_a_mixed, {V, W}, {}, {}, subdomain_map, entity_maps, V->mesh())); la::SparsityPattern sp_mixed = fem::create_sparsity_pattern(*a_mixed); sp_mixed.finalize(); la::MatrixCSR A_mixed(sp_mixed); fem::assemble_matrix(A_mixed.mat_add_values(), *a_mixed, {}); A_mixed.scatter_rev(); auto a = std::make_shared>( fem::create_form(*form_mixed_codim0_a, {W, W}, {}, {}, {}, {})); la::SparsityPattern sp = fem::create_sparsity_pattern(*a); sp.finalize(); la::MatrixCSR A(sp); fem::assemble_matrix(A.mat_add_values(), *a, {}); A.scatter_rev(); std::vector A_mixed_flattened = A_mixed.to_dense(); std::stringstream cc; cc.precision(3); cc << "A_mixed:" << std::endl; std::size_t num_owned_rows = V->dofmap()->index_map->size_local(); std::size_t num_sub_cols = W->dofmap()->index_map->size_local() + W->dofmap()->index_map->num_ghosts(); for (std::size_t i = 0; i < num_owned_rows; i++) { for (std::size_t j = 0; j < num_sub_cols; j++) { cc << A_mixed_flattened[i * num_sub_cols + j] << " "; } cc << std::endl; } std::size_t num_owned_sub_rows = W->dofmap()->index_map->size_local(); std::vector A_flattened = A.to_dense(); cc << "A" << std::endl; for (std::size_t i = 0; i < num_owned_sub_rows; i++) { for (std::size_t j = 0; j < num_sub_cols; j++) { cc << A_flattened[i * num_sub_cols + j] << " "; } cc << std::endl; } std::cout << cc.str() << std::endl; } PetscFinalize(); return 0; } fenics-dolfinx-0.9.0/cpp/demo/codim_0_assembly/mixed_codim0.py000066400000000000000000000021011470520666000242620ustar00rootroot00000000000000# This demo aims to illustrate how to assemble a matrix with a trial function # defined on a submesh of co-dimension 0, and a test function defined on the parent mesh from basix.ufl import element from ufl import ( FunctionSpace, Mesh, TestFunction, TrialFunction, dx, inner, ) cell = "quadrilateral" coord_element = element("Lagrange", cell, 1, shape=(2,)) mesh = Mesh(coord_element) # We define the function space and test function on the full mesh e = element("Lagrange", cell, 1) V = FunctionSpace(mesh, e) v = TestFunction(V) # Next we define the sub-mesh submesh = Mesh(coord_element) W = FunctionSpace(submesh, e) p = TrialFunction(W) # And finally we define a "mass matrix" on the submesh, with the test function # of the parent mesh. The integration domain is the parent mesh, but we restrict integration # to all cells marked with subdomain_id=3, which will indicate what cells of our mesh is part # of the submesh a_mixed = inner(p, v) * dx(domain=mesh, subdomain_id=3) q = TestFunction(W) a = inner(p, q) * dx(domain=submesh) forms = [a_mixed, a] fenics-dolfinx-0.9.0/cpp/demo/custom_kernel/000077500000000000000000000000001470520666000210165ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/demo/custom_kernel/CMakeLists.txt000066400000000000000000000023471470520666000235640ustar00rootroot00000000000000# This file was generated by running # # python cmake/scripts/generate-cmakefiles.py from dolfinx/cpp # cmake_minimum_required(VERSION 3.19) set(PROJECT_NAME demo_custom_kernel) project(${PROJECT_NAME} LANGUAGES C CXX) # Set C++20 standard set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) if(NOT TARGET dolfinx) find_package(DOLFINX REQUIRED) endif() set(CMAKE_INCLUDE_CURRENT_DIR ON) add_executable(${PROJECT_NAME} main.cpp) target_link_libraries(${PROJECT_NAME} dolfinx) # Do not throw error for 'multi-line comments' (these are typical in rst which # includes LaTeX) include(CheckCXXCompilerFlag) check_cxx_compiler_flag("-Wno-comment" HAVE_NO_MULTLINE) set_source_files_properties( main.cpp PROPERTIES COMPILE_FLAGS "$<$:-Wno-comment -Wall -Wextra -pedantic -Werror>" ) # Test targets (used by DOLFINx testing system) set(TEST_PARAMETERS2 -np 2 ${MPIEXEC_PARAMS} "./${PROJECT_NAME}") set(TEST_PARAMETERS3 -np 3 ${MPIEXEC_PARAMS} "./${PROJECT_NAME}") add_test(NAME ${PROJECT_NAME}_mpi_2 COMMAND "mpirun" ${TEST_PARAMETERS2}) add_test(NAME ${PROJECT_NAME}_mpi_3 COMMAND "mpirun" ${TEST_PARAMETERS3}) add_test(NAME ${PROJECT_NAME}_serial COMMAND ${PROJECT_NAME}) fenics-dolfinx-0.9.0/cpp/demo/custom_kernel/main.cpp000066400000000000000000000246231470520666000224550ustar00rootroot00000000000000// ```text // Copyright (C) 2024 Jack S. Hale and Garth N. Wells // This file is part of DOLFINx (https://www.fenicsproject.org) // SPDX-License-Identifier: LGPL-3.0-or-later // ``` // # Custom cell kernel assembly // // This demo shows various methods to define custom cell kernels in C++ // and have them assembled into DOLFINx linear algebra data structures. #include #include #include #include #include #include #include #include #include #include #include #include using namespace dolfinx; template using mdspand_t = MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< T, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents>; template using mdspan2_t = MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan>; /// @brief Compute the P1 element mass matrix on the reference cell. /// @tparam T Scalar type. /// @param phi Basis functions. /// @param w Integration weights. /// @return Element reference matrix (row-major storage). template std::array A_ref(mdspand_t phi, std::span w) { std::array A_b{}; mdspan2_t A(A_b.data()); for (std::size_t k = 0; k < phi.extent(1); ++k) // quadrature point for (std::size_t i = 0; i < A.extent(0); ++i) // row i for (std::size_t j = 0; j < A.extent(1); ++j) // column j A(i, j) += w[k] * phi(0, k, i, 0) * phi(0, k, j, 0); return A_b; } /// @brief Compute the P1 RHS vector for f=1 on the reference cell. /// @tparam T Scalar type. /// @param phi Basis functions. /// @param w Integration weights. /// @return RHS reference vector. template std::array b_ref(mdspand_t phi, std::span w) { std::array b{}; for (std::size_t k = 0; k < phi.extent(1); ++k) // quadrature point for (std::size_t i = 0; i < b.size(); ++i) // row i b[i] += w[k] * phi(0, k, i, 0); return b; } /// @brief Assemble a matrix operator using a `std::function` kernel /// function. /// @tparam T Scalar type. /// @param V Function space. /// @param kernel Element kernel to execute. /// @param cells Cells to execute the kernel over. /// @return Frobenius norm squared of the matrix. template double assemble_matrix0(std::shared_ptr> V, auto kernel, std::span cells) { // Kernel data (ID, kernel function, cell indices to execute over) std::vector kernel_data{ fem::integral_data(-1, kernel, cells, std::vector{})}; // Associate kernel with cells (as opposed to facets, etc) std::map integrals{std::pair{fem::IntegralType::cell, kernel_data}}; fem::Form a({V, V}, integrals, {}, {}, false, {}, V->mesh()); auto dofmap = V->dofmap(); auto sp = la::SparsityPattern( V->mesh()->comm(), {dofmap->index_map, dofmap->index_map}, {dofmap->index_map_bs(), dofmap->index_map_bs()}); fem::sparsitybuild::cells(sp, {cells, cells}, {*dofmap, *dofmap}); sp.finalize(); la::MatrixCSR A(sp); common::Timer timer("Assembler0 std::function (matrix)"); assemble_matrix(A.mat_add_values(), a, {}); A.scatter_rev(); return A.squared_norm(); } /// @brief Assemble a RHS vector using a `std::function` kernel /// function. /// @tparam T Scalar type. /// @param V Function space. /// @param kernel Element kernel to execute. /// @param cells Cells to execute the kernel over. /// @return l2 norm squared of the vector. template double assemble_vector0(std::shared_ptr> V, auto kernel, std::span cells) { auto mesh = V->mesh(); std::vector kernal_data{ fem::integral_data(-1, kernel, cells, std::vector{})}; std::map integrals{std::pair{fem::IntegralType::cell, kernal_data}}; fem::Form L({V}, integrals, {}, {}, false, {}, mesh); auto dofmap = V->dofmap(); la::Vector b(dofmap->index_map, 1); common::Timer timer("Assembler0 std::function (vector)"); fem::assemble_vector(b.mutable_array(), L); b.scatter_rev(std::plus()); return la::squared_norm(b); } /// @brief Assemble a matrix operator using a lambda kernel function. /// /// The lambda function can be inlined in the assembly code, which can /// be important for performance for lightweight kernels. /// /// @tparam T Scalar type. /// @param g mesh geometry. /// @param dofmap dofmap. /// @param kernel Element kernel to execute. /// @param cells Cells to execute the kernel over. /// @return Frobenius norm squared of the matrix. template double assemble_matrix1(const mesh::Geometry& g, const fem::DofMap& dofmap, auto kernel, std::span cells) { auto sp = la::SparsityPattern(dofmap.index_map->comm(), {dofmap.index_map, dofmap.index_map}, {dofmap.index_map_bs(), dofmap.index_map_bs()}); fem::sparsitybuild::cells(sp, {cells, cells}, {dofmap, dofmap}); sp.finalize(); la::MatrixCSR A(sp); auto ident = [](auto, auto, auto, auto) {}; // DOF permutation not required common::Timer timer("Assembler1 lambda (matrix)"); fem::impl::assemble_cells(A.mat_add_values(), g.dofmap(), g.x(), cells, {dofmap.map(), 1, cells}, ident, {dofmap.map(), 1, cells}, ident, {}, {}, kernel, std::span(), 0, {}, {}, {}); A.scatter_rev(); return A.squared_norm(); } /// @brief Assemble a RHS vector using using a lambda kernel function. /// /// The lambda function can be inlined in the assembly code, which can /// be important for performance for lightweight kernels. /// /// @tparam T Scalar type. /// @param g mesh geometry. /// @param dofmap dofmap. /// @param kernel Element kernel to execute. /// @param cells Cells to execute the kernel over. /// @return l2 norm squared of the vector. template double assemble_vector1(const mesh::Geometry& g, const fem::DofMap& dofmap, auto kernel, const std::vector& cells) { la::Vector b(dofmap.index_map, 1); common::Timer timer("Assembler1 lambda (vector)"); fem::impl::assemble_cells( [](auto, auto, auto, auto) {}, b.mutable_array(), g.dofmap(), g.x(), cells, {dofmap.map(), 1, cells}, kernel, {}, {}, 0, {}); b.scatter_rev(std::plus()); return la::squared_norm(b); } /// @brief Assemble P1 mass matrix and a RHS vector using element kernel /// approaches. /// /// Function demonstrates how hand-coded element kernels can be executed /// in assembly over cells. /// /// @tparam T Scalar type. /// @param comm MPI communicator to assembler over. template void assemble(MPI_Comm comm) { // Create mesh auto mesh = std::make_shared>(mesh::create_rectangle( comm, {{{0, 0}, {1, 1}}}, {516, 116}, mesh::CellType::triangle)); // Create Basix P1 Lagrange element. This will be used to construct // basis functions inside the custom cell kernel. constexpr int order = 1; basix::FiniteElement e = basix::create_element( basix::element::family::P, mesh::cell_type_to_basix_type(mesh::CellType::triangle), order, basix::element::lagrange_variant::unset, basix::element::dpc_variant::unset, false); // Construct quadrature rule constexpr int max_degree = 2 * order; auto quadrature_type = basix::quadrature::get_default_rule( basix::cell::type::triangle, max_degree); auto [X_b, weights] = basix::quadrature::make_quadrature( quadrature_type, basix::cell::type::triangle, basix::polyset::type::standard, max_degree); mdspand_t X(X_b.data(), weights.size(), 2); // Create a scalar function space auto V = std::make_shared>( fem::create_functionspace(mesh, e)); // Build list of cells to assembler over (all cells owned by this // rank) std::int32_t size_local = mesh->topology()->index_map(mesh->topology()->dim())->size_local(); std::vector cells(size_local); std::iota(cells.begin(), cells.end(), 0); // Tabulate basis functions at quadrature points auto e_shape = e.tabulate_shape(0, weights.size()); std::size_t length = std::accumulate(e_shape.begin(), e_shape.end(), 1, std::multiplies<>{}); std::vector phi_b(length); mdspand_t phi(phi_b.data(), e_shape); e.tabulate(0, X, phi); // Utility function to compute det(J) for an affine triangle cell // (geometry is 3D) auto detJ = [](mdspan2_t x) { return std::abs((x(0, 0) - x(1, 0)) * (x(2, 1) - x(1, 1)) - (x(0, 1) - x(1, 1)) * (x(2, 0) - x(1, 0))); }; // Finite element mass matrix kernel function std::array A_hat_b = A_ref(phi, weights); auto kernel_a = [A_hat = mdspan2_t(A_hat_b.data()), detJ](T* A, const T*, const T*, const T* x, const int*, const uint8_t*) { T scale = detJ(mdspan2_t(x)); mdspan2_t _A(A); for (std::size_t i = 0; i < A_hat.extent(0); ++i) for (std::size_t j = 0; j < A_hat.extent(1); ++j) _A(i, j) = scale * A_hat(i, j); }; // Finite element RHS (f=1) kernel function auto kernel_L = [b_hat = b_ref(phi, weights), detJ](T* b, const T*, const T*, const T* x, const int*, const uint8_t*) { T scale = detJ(mdspan2_t(x)); for (std::size_t i = 0; i < 3; ++i) b[i] = scale * b_hat[i]; }; // Assemble matrix and vector using std::function kernel assemble_matrix0(V, kernel_a, cells); assemble_vector0(V, kernel_L, cells); // Assemble matrix and vector using lambda kernel. This version // supports efficient inlining of the kernel in the assembler. This // can give a significant performance improvement for lightweight // kernels. assemble_matrix1(mesh->geometry(), *V->dofmap(), kernel_a, cells); assemble_vector1(mesh->geometry(), *V->dofmap(), kernel_L, cells); list_timings(comm, {TimingType::wall}); } int main(int argc, char* argv[]) { MPI_Init(&argc, &argv); dolfinx::init_logging(argc, argv); assemble(MPI_COMM_WORLD); assemble(MPI_COMM_WORLD); MPI_Finalize(); return 0; } fenics-dolfinx-0.9.0/cpp/demo/hyperelasticity/000077500000000000000000000000001470520666000213665ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/demo/hyperelasticity/CMakeLists.txt000066400000000000000000000040531470520666000241300ustar00rootroot00000000000000# This file was generated by running # # python cmake/scripts/generate-cmakefiles from dolfinx/cpp # cmake_minimum_required(VERSION 3.19) set(PROJECT_NAME demo_hyperelasticity) project(${PROJECT_NAME} LANGUAGES C CXX) # Set C++20 standard set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) if(NOT TARGET dolfinx) find_package(DOLFINX REQUIRED) endif() include(CheckSymbolExists) set(CMAKE_REQUIRED_INCLUDES ${PETSC_INCLUDE_DIRS}) check_symbol_exists(PETSC_USE_COMPLEX petscsystypes.h PETSC_SCALAR_COMPLEX) if(PETSC_SCALAR_COMPLEX EQUAL 1) message(STATUS "** This demo does not support complex mode") else() check_symbol_exists(PETSC_USE_REAL_DOUBLE petscsystypes.h PETSC_REAL_DOUBLE) if(PETSC_REAL_DOUBLE EQUAL 1) set(SCALAR_TYPE "--scalar_type=float64") else() set(SCALAR_TYPE "--scalar_type=float32") endif() # Add target to compile UFL files add_custom_command( OUTPUT hyperelasticity.c COMMAND ffcx ${CMAKE_CURRENT_SOURCE_DIR}/hyperelasticity.py ${SCALAR_TYPE} VERBATIM DEPENDS hyperelasticity.py COMMENT "Compile hyperelasticity.py using FFCx" ) set(CMAKE_INCLUDE_CURRENT_DIR ON) add_executable(${PROJECT_NAME} main.cpp ${CMAKE_CURRENT_BINARY_DIR}/hyperelasticity.c) target_link_libraries(${PROJECT_NAME} dolfinx) # Do not throw error for 'multi-line comments' (these are typical in rst which # includes LaTeX) include(CheckCXXCompilerFlag) check_cxx_compiler_flag("-Wno-comment" HAVE_NO_MULTLINE) set_source_files_properties( main.cpp PROPERTIES COMPILE_FLAGS "$<$:-Wno-comment -Wall -Wextra -pedantic -Werror>" ) # Test targets (used by DOLFINx testing system) set(TEST_PARAMETERS2 -np 2 ${MPIEXEC_PARAMS} "./${PROJECT_NAME}") set(TEST_PARAMETERS3 -np 3 ${MPIEXEC_PARAMS} "./${PROJECT_NAME}") add_test(NAME ${PROJECT_NAME}_mpi_2 COMMAND "mpirun" ${TEST_PARAMETERS2}) add_test(NAME ${PROJECT_NAME}_mpi_3 COMMAND "mpirun" ${TEST_PARAMETERS3}) add_test(NAME ${PROJECT_NAME}_serial COMMAND ${PROJECT_NAME}) endif() fenics-dolfinx-0.9.0/cpp/demo/hyperelasticity/hyperelasticity.py000066400000000000000000000045301470520666000251640ustar00rootroot00000000000000# The first step is to define the variational problem at hand. # # We are interested in solving for a discrete vector field in three # dimensions, so first we need the appropriate finite element space and # trial and test functions on this space: from basix.ufl import element from ufl import ( Coefficient, Constant, FunctionSpace, Identity, Mesh, TestFunction, TrialFunction, derivative, det, diff, ds, dx, grad, inner, ln, tr, variable, ) # Function spaces e = element("Lagrange", "tetrahedron", 1, shape=(3,)) mesh = Mesh(e) V = FunctionSpace(mesh, e) # Trial and test functions du = TrialFunction(V) # Incremental displacement v = TestFunction(V) # Test function # Note that `element` with `shape=(3,)` creates a finite element space # of vector fields. # # Next, we will be needing functions for the boundary source `B`, the # traction `T` and the displacement solution itself `u`: # Functions u = Coefficient(V) # Displacement from previous iteration B = Constant(mesh, shape=(3,)) # Body force per unit volume T = Constant(mesh, shape=(3,)) # Traction force on the boundary # Now, we can define the kinematic quantities involved in the model: # Kinematics d = len(u) I = Identity(d) # Identity tensor # noqa: E741 F = variable(I + grad(u)) # Deformation gradient C = F.T * F # Right Cauchy-Green tensor # Invariants of deformation tensors Ic = tr(C) J = det(F) # Before defining the energy density and thus the total potential # energy, it only remains to specify constants for the elasticity # parameters: # Elasticity parameters E = 10.0 nu = 0.3 mu = E / (2 * (1 + nu)) lmbda = E * nu / ((1 + nu) * (1 - 2 * nu)) # Both the first variation of the potential energy, and the Jacobian of # the variation, can be automatically computed by a call to # `derivative`: # Stored strain energy density (compressible neo-Hookean model) psi = (mu / 2) * (Ic - 3) - mu * ln(J) + (lmbda / 2) * (ln(J)) ** 2 # Total potential energy Pi = psi * dx - inner(B, u) * dx - inner(T, u) * ds # First variation of Pi (directional derivative about u in the direction # of v) F_form = derivative(Pi, u, v) # Compute Jacobian of F J_form = derivative(F_form, u, du) # Compute Cauchy stress sigma = (1 / J) * diff(psi, F) * F.T forms = [F_form, J_form] elements = [e] expressions = [(sigma, [[0.25, 0.25, 0.25]])] fenics-dolfinx-0.9.0/cpp/demo/hyperelasticity/main.cpp000066400000000000000000000234301470520666000230200ustar00rootroot00000000000000// # Hyperelasticity // // Solve a compressible neo-Hookean model in 3D. // ## UFL form file // // The UFL file is implemented in // {download}`demo_hyperelasticity/hyperelasticity.py`. // ````{admonition} UFL form implemented in python // :class: dropdown // ![ufl-code] // ```` // // ## C++ program #include "hyperelasticity.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include using namespace dolfinx; using T = PetscScalar; using U = typename dolfinx::scalar_value_type_t; /// Hyperelastic problem class class HyperElasticProblem { public: /// Constructor HyperElasticProblem( std::shared_ptr> L, std::shared_ptr> J, std::vector>> bcs) : _l(L), _j(J), _bcs(bcs), _b(L->function_spaces()[0]->dofmap()->index_map, L->function_spaces()[0]->dofmap()->index_map_bs()), _matA(la::petsc::Matrix(fem::petsc::create_matrix(*J, "aij"), false)) { auto map = L->function_spaces()[0]->dofmap()->index_map; const int bs = L->function_spaces()[0]->dofmap()->index_map_bs(); std::int32_t size_local = bs * map->size_local(); std::vector ghosts(map->ghosts().begin(), map->ghosts().end()); std::int64_t size_global = bs * map->size_global(); VecCreateGhostBlockWithArray(map->comm(), bs, size_local, size_global, ghosts.size(), ghosts.data(), _b.array().data(), &_b_petsc); } /// Destructor virtual ~HyperElasticProblem() { if (_b_petsc) VecDestroy(&_b_petsc); } /// @brief Form /// @return auto form() { return [](Vec x) { VecGhostUpdateBegin(x, INSERT_VALUES, SCATTER_FORWARD); VecGhostUpdateEnd(x, INSERT_VALUES, SCATTER_FORWARD); }; } /// Compute F at current point x auto F() { return [&](const Vec x, Vec) { // Assemble b and update ghosts std::span b(_b.mutable_array()); std::ranges::fill(b, 0); fem::assemble_vector(b, *_l); VecGhostUpdateBegin(_b_petsc, ADD_VALUES, SCATTER_REVERSE); VecGhostUpdateEnd(_b_petsc, ADD_VALUES, SCATTER_REVERSE); // Set bcs Vec x_local; VecGhostGetLocalForm(x, &x_local); PetscInt n = 0; VecGetSize(x_local, &n); const T* _x = nullptr; VecGetArrayRead(x_local, &_x); std::ranges::for_each(_bcs, [b, x = std::span(_x, n)](auto& bc) { bc->set(b, x, -1); }); VecRestoreArrayRead(x_local, &_x); }; } /// Compute J = F' at current point x auto J() { return [&](const Vec, Mat A) { MatZeroEntries(A); fem::assemble_matrix(la::petsc::Matrix::set_block_fn(A, ADD_VALUES), *_j, _bcs); MatAssemblyBegin(A, MAT_FLUSH_ASSEMBLY); MatAssemblyEnd(A, MAT_FLUSH_ASSEMBLY); fem::set_diagonal(la::petsc::Matrix::set_fn(A, INSERT_VALUES), *_j->function_spaces()[0], _bcs); MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY); MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY); }; } /// RHS vector Vec vector() { return _b_petsc; } /// Jacobian matrix Mat matrix() { return _matA.mat(); } private: std::shared_ptr> _l, _j; std::vector>> _bcs; la::Vector _b; Vec _b_petsc = nullptr; la::petsc::Matrix _matA; }; int main(int argc, char* argv[]) { init_logging(argc, argv); PetscInitialize(&argc, &argv, nullptr, nullptr); // Set the logging thread name to show the process rank int mpi_rank; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); std::string fmt = "[%Y-%m-%d %H:%M:%S.%e] [RANK " + std::to_string(mpi_rank) + "] [%l] %v"; spdlog::set_pattern(fmt); { // Inside the `main` function, we begin by defining a tetrahedral // mesh of the domain and the function space on this mesh. Here, we // choose to create a unit cube mesh with 25 ( = 24 + 1) vertices in // one direction and 17 ( = 16 + 1) vertices in the other two // directions. With this mesh, we initialize the (finite element) // function space defined by the generated code. // Create mesh and define function space auto mesh = std::make_shared>(mesh::create_box( MPI_COMM_WORLD, {{{0.0, 0.0, 0.0}, {1.0, 1.0, 1.0}}}, {10, 10, 10}, mesh::CellType::tetrahedron, mesh::create_cell_partitioner(mesh::GhostMode::none))); auto element = basix::create_element( basix::element::family::P, basix::cell::type::tetrahedron, 1, basix::element::lagrange_variant::unset, basix::element::dpc_variant::unset, false); auto V = std::make_shared>( fem::create_functionspace(mesh, element, {3})); auto B = std::make_shared>(std::vector{0, 0, 0}); auto traction = std::make_shared>(std::vector{0, 0, 0}); // Define solution function auto u = std::make_shared>(V); auto a = std::make_shared>( fem::create_form(*form_hyperelasticity_J_form, {V, V}, {{"u", u}}, {{"B", B}, {"T", traction}}, {}, {})); auto L = std::make_shared>( fem::create_form(*form_hyperelasticity_F_form, {V}, {{"u", u}}, {{"B", B}, {"T", traction}}, {}, {})); auto u_rotation = std::make_shared>(V); u_rotation->interpolate( [](auto x) -> std::pair, std::vector> { constexpr U scale = 0.005; // Center of rotation constexpr U x1_c = 0.5; constexpr U x2_c = 0.5; // Large angle of rotation (60 degrees) constexpr U theta = 1.04719755; // New coordinates std::vector fdata(3 * x.extent(1), 0.0); MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< U, MDSPAN_IMPL_STANDARD_NAMESPACE::extents< std::size_t, 3, MDSPAN_IMPL_STANDARD_NAMESPACE::dynamic_extent>> f(fdata.data(), 3, x.extent(1)); for (std::size_t p = 0; p < x.extent(1); ++p) { U x1 = x(1, p); U x2 = x(2, p); f(1, p) = scale * (x1_c + (x1 - x1_c) * std::cos(theta) - (x2 - x2_c) * std::sin(theta) - x1); f(2, p) = scale * (x2_c + (x1 - x1_c) * std::sin(theta) - (x2 - x2_c) * std::cos(theta) - x2); } return {std::move(fdata), {3, x.extent(1)}}; }); // Create Dirichlet boundary conditions auto bdofs_left = fem::locate_dofs_geometrical( *V, [](auto x) { constexpr U eps = 1.0e-6; std::vector marker(x.extent(1), false); for (std::size_t p = 0; p < x.extent(1); ++p) { if (std::abs(x(0, p)) < eps) marker[p] = true; } return marker; }); auto bdofs_right = fem::locate_dofs_geometrical( *V, [](auto x) { constexpr U eps = 1.0e-6; std::vector marker(x.extent(1), false); for (std::size_t p = 0; p < x.extent(1); ++p) { if (std::abs(x(0, p) - 1) < eps) marker[p] = true; } return marker; }); std::vector bcs = { std::make_shared>(std::vector{0, 0, 0}, bdofs_left, V), std::make_shared>(u_rotation, bdofs_right)}; HyperElasticProblem problem(L, a, bcs); nls::petsc::NewtonSolver newton_solver(mesh->comm()); newton_solver.setF(problem.F(), problem.vector()); newton_solver.setJ(problem.J(), problem.matrix()); newton_solver.set_form(problem.form()); newton_solver.rtol = 10 * std::numeric_limits::epsilon(); newton_solver.atol = 10 * std::numeric_limits::epsilon(); la::petsc::Vector _u(la::petsc::create_vector_wrap(*u->x()), false); auto [niter, success] = newton_solver.solve(_u.vec()); std::cout << "Number of Newton iterations: " << niter << std::endl; // Compute Cauchy stress. Construct appropriate Basix element for // stress. constexpr auto family = basix::element::family::P; auto cell_type = mesh::cell_type_to_basix_type(mesh->topology()->cell_type()); constexpr int k = 0; constexpr bool discontinuous = true; basix::FiniteElement S_element = basix::create_element( family, cell_type, k, basix::element::lagrange_variant::unset, basix::element::dpc_variant::unset, discontinuous); auto S = std::make_shared>(fem::create_functionspace( mesh, S_element, std::vector{3, 3})); auto sigma_expression = fem::create_expression( *expression_hyperelasticity_sigma, {{"u", u}}, {}); auto sigma = fem::Function(S); sigma.name = "cauchy_stress"; sigma.interpolate(sigma_expression); // Save solution in VTK format io::VTKFile file_u(mesh->comm(), "u.pvd", "w"); file_u.write({*u}, 0.0); // Save Cauchy stress in XDMF format io::XDMFFile file_sigma(mesh->comm(), "sigma.xdmf", "w"); file_sigma.write_mesh(*mesh); file_sigma.write_function(sigma, 0.0); } PetscFinalize(); return 0; } fenics-dolfinx-0.9.0/cpp/demo/interpolation-io/000077500000000000000000000000001470520666000214405ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/demo/interpolation-io/CMakeLists.txt000066400000000000000000000023521470520666000242020ustar00rootroot00000000000000# This file was generated by running # # python cmake/scripts/generate-cmakefiles.py from dolfinx/cpp # cmake_minimum_required(VERSION 3.19) set(PROJECT_NAME demo_interpolation-io) project(${PROJECT_NAME} LANGUAGES C CXX) # Set C++20 standard set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) if(NOT TARGET dolfinx) find_package(DOLFINX REQUIRED) endif() set(CMAKE_INCLUDE_CURRENT_DIR ON) add_executable(${PROJECT_NAME} main.cpp) target_link_libraries(${PROJECT_NAME} dolfinx) # Do not throw error for 'multi-line comments' (these are typical in rst which # includes LaTeX) include(CheckCXXCompilerFlag) check_cxx_compiler_flag("-Wno-comment" HAVE_NO_MULTLINE) set_source_files_properties( main.cpp PROPERTIES COMPILE_FLAGS "$<$:-Wno-comment -Wall -Wextra -pedantic -Werror>" ) # Test targets (used by DOLFINx testing system) set(TEST_PARAMETERS2 -np 2 ${MPIEXEC_PARAMS} "./${PROJECT_NAME}") set(TEST_PARAMETERS3 -np 3 ${MPIEXEC_PARAMS} "./${PROJECT_NAME}") add_test(NAME ${PROJECT_NAME}_mpi_2 COMMAND "mpirun" ${TEST_PARAMETERS2}) add_test(NAME ${PROJECT_NAME}_mpi_3 COMMAND "mpirun" ${TEST_PARAMETERS3}) add_test(NAME ${PROJECT_NAME}_serial COMMAND ${PROJECT_NAME}) fenics-dolfinx-0.9.0/cpp/demo/interpolation-io/main.cpp000066400000000000000000000221231470520666000230700ustar00rootroot00000000000000// ```text // Copyright (C) 2022-2023 Garth N. Wells // This file is part of DOLFINx (https://www.fenicsproject.org) // SPDX-License-Identifier: LGPL-3.0-or-later // ``` // # Interpolation and IO #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include using namespace dolfinx; /// @brief Interpolate a function into a Lagrange finite element space /// and outputs the finite element function to a VTX file for /// visualisation. /// /// @tparam T Scalar type of the finite element function. /// @tparam U Float type for the finite element basis and the mesh. /// @param mesh Mesh. /// @param filename Output filename. File output requires DOLFINX to be /// configured with ADIOS2. template void interpolate_scalar(std::shared_ptr> mesh, [[maybe_unused]] std::filesystem::path filename) { // Create a Basix continuous Lagrange element of degree 1 basix::FiniteElement e = basix::create_element( basix::element::family::P, mesh::cell_type_to_basix_type(mesh::CellType::triangle), 1, basix::element::lagrange_variant::unset, basix::element::dpc_variant::unset, false); // Create a scalar function space auto V = std::make_shared>( fem::create_functionspace(mesh, e)); // Create a finite element Function auto u = std::make_shared>(V); // Interpolate sin(2 \pi x[0]) in the scalar Lagrange finite element // space u->interpolate( [](auto x) -> std::pair, std::vector> { std::vector f(x.extent(1)); for (std::size_t p = 0; p < x.extent(1); ++p) f[p] = std::sin(2 * std::numbers::pi * x(0, p)); return {f, {f.size()}}; }); #ifdef HAS_ADIOS2 // Write the function to a VTX file for visualisation, e.g. using // ParaView io::VTXWriter outfile(mesh->comm(), filename.replace_extension("bp"), {u}, "BP4"); outfile.write(0.0); outfile.close(); #endif } /// @brief Interpolate a function into a H(curl) finite element space. /// /// To visualise the function, the H(curl) finite element function is /// interpolated in a discontinuous Lagrange space, which is written to /// a VTX file for visualisation. This allows exact visualisation of a /// function in H(curl). /// /// @tparam T Scalar type of the finite element function. /// @tparam U Float type for the finite element basis and the mesh. /// @param mesh Mesh. /// @param filename Output filename. File output requires DOLFINX to be /// configured with ADIOS2. template void interpolate_nedelec(std::shared_ptr> mesh, [[maybe_unused]] std::filesystem::path filename) { // Create a Basix Nedelec (first kind) element of degree 2 (dim=6 on // triangle) basix::FiniteElement e = basix::create_element( basix::element::family::N1E, mesh::cell_type_to_basix_type(mesh::CellType::triangle), 2, basix::element::lagrange_variant::legendre, basix::element::dpc_variant::unset, false); // Create a Nedelec function space auto V = std::make_shared>( fem::create_functionspace(mesh, e)); // Create a Nedelec finite element Function auto u = std::make_shared>(V); // Interpolate the vector field // u = [x[0], x[1]] if x[0] < 0.5 // [x[0] + 1, x[1]] if x[0] >= 0.5 // in the Nedelec space. // // Note that the x1 component of this field is continuous, and the x0 // component is discontinuous across x0 = 0.5. This function lies in // the Nedelec space when there are cell edges aligned to x0 = 0.5. // Find cells with all vertices satisfying (0) x0 <= 0.5 and (1) x0 >= 0.5 auto cells0 = mesh::locate_entities(*mesh, 2, [](auto x) { std::vector marked; for (std::size_t i = 0; i < x.extent(1); ++i) marked.push_back(x(0, i) <= 0.5); return marked; }); auto cells1 = mesh::locate_entities(*mesh, 2, [](auto x) { std::vector marked; for (std::size_t i = 0; i < x.extent(1); ++i) marked.push_back(x(0, i) >= 0.5); return marked; }); // Interpolation on the two sets of cells u->interpolate( [](auto x) -> std::pair, std::vector> { std::vector f(2 * x.extent(1), 0.0); std::copy_n(x.data_handle(), f.size(), f.begin()); return {f, {2, x.extent(1)}}; }, cells0); u->interpolate( [](auto x) -> std::pair, std::vector> { std::vector f(2 * x.extent(1), 0.0); std::copy_n(x.data_handle(), f.size(), f.begin()); std::ranges::transform(f, f.begin(), [](auto x) { return x + T(1); }); return {f, {2, x.extent(1)}}; }, cells1); // Nedelec spaces are not generally supported by visualisation tools. // Simply evaluating a Nedelec function at cell vertices can // mis-represent the function. However, we can represented a Nedelec // function exactly in a discontinuous Lagrange space which we can // then visualise. We do this here. // First create a degree 2 vector-valued discontinuous Lagrange space // (which contains the N2 space): basix::FiniteElement e_l = basix::create_element( basix::element::family::P, mesh::cell_type_to_basix_type(mesh::CellType::triangle), 2, basix::element::lagrange_variant::unset, basix::element::dpc_variant::unset, true); // Create a function space auto V_l = std::make_shared>( fem::create_functionspace(mesh, e_l, std::vector{2})); auto u_l = std::make_shared>(V_l); // Interpolate the Nedelec function into the discontinuous Lagrange // space: u_l->interpolate(*u); // Output the discontinuous Lagrange space in VTX format. When plotting // the x0 component the field will appear discontinuous at x0 = 0.5 // (jump in the normal component between cells) and the x1 component // will appear continuous (continuous tangent component between cells). #ifdef HAS_ADIOS2 io::VTXWriter outfile(mesh->comm(), filename.replace_extension("bp"), {u_l}, "BP4"); outfile.write(0.0); outfile.close(); #endif } /// @brief This program shows how to interpolate functions into different types /// of finite element spaces and output the result to file for visualisation. int main(int argc, char* argv[]) { dolfinx::init_logging(argc, argv); MPI_Init(&argc, &argv); // The main body of the function is scoped to ensure that all objects // that depend on an MPI communicator are destroyed before MPI is // finalised at the end of this function. { // Create meshes. For what comes later in this demo we need to // ensure that a boundary between cells is located at x0=0.5 // Create mesh using float for geometry coordinates auto mesh0 = std::make_shared>(mesh::create_rectangle( MPI_COMM_WORLD, {{{0.0, 0.0}, {1.0, 1.0}}}, {32, 4}, mesh::CellType::triangle, mesh::create_cell_partitioner(mesh::GhostMode::none))); // Create mesh using same topology as mesh0, but with different // scalar type for geometry auto mesh1 = std::make_shared>(mesh::create_rectangle( MPI_COMM_WORLD, {{{0.0, 0.0}, {1.0, 1.0}}}, {32, 4}, mesh::CellType::triangle, mesh::create_cell_partitioner(mesh::GhostMode::none))); // Interpolate a function in a scalar Lagrange space and output the // result to file for visualisation using different types interpolate_scalar(mesh0, "u32"); interpolate_scalar(mesh1, "u64"); interpolate_scalar>(mesh0, "u_complex64"); interpolate_scalar>(mesh1, "u_complex128"); // Interpolate a function in a H(curl) finite element space, and // then interpolate the H(curl) function in a discontinuous Lagrange // space for visualisation using different types interpolate_nedelec(mesh0, "u_nedelec32"); interpolate_nedelec(mesh1, "u_nedelec64"); interpolate_nedelec>(mesh0, "u_nedelec_complex64"); interpolate_nedelec>(mesh1, "u_nedelec_complex128"); } MPI_Finalize(); return 0; } fenics-dolfinx-0.9.0/cpp/demo/interpolation_different_meshes/000077500000000000000000000000001470520666000244255ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/demo/interpolation_different_meshes/CMakeLists.txt000066400000000000000000000023701470520666000271670ustar00rootroot00000000000000# This file was generated by running # # python cmake/scripts/generate-cmakefiles.py from dolfinx/cpp # cmake_minimum_required(VERSION 3.19) set(PROJECT_NAME demo_interpolation_different_meshes) project(${PROJECT_NAME} LANGUAGES C CXX) # Set C++20 standard set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) if(NOT TARGET dolfinx) find_package(DOLFINX REQUIRED) endif() set(CMAKE_INCLUDE_CURRENT_DIR ON) add_executable(${PROJECT_NAME} main.cpp) target_link_libraries(${PROJECT_NAME} dolfinx) # Do not throw error for 'multi-line comments' (these are typical in rst which # includes LaTeX) include(CheckCXXCompilerFlag) check_cxx_compiler_flag("-Wno-comment" HAVE_NO_MULTLINE) set_source_files_properties( main.cpp PROPERTIES COMPILE_FLAGS "$<$:-Wno-comment -Wall -Wextra -pedantic -Werror>" ) # Test targets (used by DOLFINx testing system) set(TEST_PARAMETERS2 -np 2 ${MPIEXEC_PARAMS} "./${PROJECT_NAME}") set(TEST_PARAMETERS3 -np 3 ${MPIEXEC_PARAMS} "./${PROJECT_NAME}") add_test(NAME ${PROJECT_NAME}_mpi_2 COMMAND "mpirun" ${TEST_PARAMETERS2}) add_test(NAME ${PROJECT_NAME}_mpi_3 COMMAND "mpirun" ${TEST_PARAMETERS3}) add_test(NAME ${PROJECT_NAME}_serial COMMAND ${PROJECT_NAME}) fenics-dolfinx-0.9.0/cpp/demo/interpolation_different_meshes/main.cpp000066400000000000000000000070251470520666000260610ustar00rootroot00000000000000// ```text // Copyright (C) 2022 Igor A. Baratta and Massimiliano Leoni // This file is part of DOLFINx (https://www.fenicsproject.org) // SPDX-License-Identifier: LGPL-3.0-or-later // ``` // # Interpolation different meshes #include #include #include #include #include using namespace dolfinx; using T = double; int main(int argc, char* argv[]) { init_logging(argc, argv); MPI_Init(&argc, &argv); { MPI_Comm comm = MPI_COMM_WORLD; // Create a tetrahedral mesh auto mesh_tet = std::make_shared>( mesh::create_box(comm, {{{0, 0, 0}, {1, 1, 1}}}, {20, 20, 20}, mesh::CellType::tetrahedron)); // Create a hexahedral mesh auto mesh_hex = std::make_shared>( mesh::create_box(comm, {{{0, 0, 0}, {1, 1, 1}}}, {15, 15, 15}, mesh::CellType::hexahedron)); basix::FiniteElement element_tet = basix::element::create_lagrange( mesh::cell_type_to_basix_type(mesh_tet->topology()->cell_type()), 1, basix::element::lagrange_variant::equispaced, false); auto V_tet = std::make_shared>( fem::create_functionspace(mesh_tet, element_tet, std::vector{3})); basix::FiniteElement element_hex = basix::element::create_lagrange( mesh::cell_type_to_basix_type(mesh_hex->topology()->cell_type()), 2, basix::element::lagrange_variant::equispaced, false); auto V_hex = std::make_shared>( fem::create_functionspace(mesh_hex, element_hex, std::vector{3})); auto u_tet = std::make_shared>(V_tet); auto u_hex = std::make_shared>(V_hex); auto fun = [](auto x) -> std::pair, std::vector> { std::vector fdata(3 * x.extent(1), 0.0); using dextent = MDSPAN_IMPL_STANDARD_NAMESPACE::dextents; MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan f(fdata.data(), 3, x.extent(1)); for (std::size_t i = 0; i < x.extent(1); ++i) { f(0, i) = std::cos(10 * x(0, i)) * std::sin(10 * x(2, i)); f(1, i) = std::sin(10 * x(0, i)) * std::sin(10 * x(2, i)); f(2, i) = std::cos(10 * x(0, i)) * std::cos(10 * x(2, i)); } return {std::move(fdata), {3, x.extent(1)}}; }; // Interpolate an expression into u_tet u_tet->interpolate(fun); // Interpolate from u_tet to u_hex auto cell_map = mesh_hex->topology()->index_map(mesh_hex->topology()->dim()); assert(cell_map); std::vector cells( cell_map->size_local() + cell_map->num_ghosts(), 0); std::iota(cells.begin(), cells.end(), 0); geometry::PointOwnershipData interpolation_data = fem::create_interpolation_data( u_hex->function_space()->mesh()->geometry(), *u_hex->function_space()->element(), *u_tet->function_space()->mesh(), std::span(cells), 1e-8); u_hex->interpolate(*u_tet, cells, interpolation_data); #ifdef HAS_ADIOS2 io::VTXWriter write_tet(mesh_tet->comm(), "u_tet.bp", {u_tet}); write_tet.write(0.0); io::VTXWriter write_hex(mesh_hex->comm(), "u_hex.bp", {u_hex}); write_hex.write(0.0); #endif } MPI_Finalize(); return 0; } fenics-dolfinx-0.9.0/cpp/demo/poisson/000077500000000000000000000000001470520666000176365ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/demo/poisson/CMakeLists.txt000066400000000000000000000040261470520666000224000ustar00rootroot00000000000000# This file was generated by running # # python cmake/scripts/generate-cmakefiles from dolfinx/cpp # cmake_minimum_required(VERSION 3.19) set(PROJECT_NAME demo_poisson) project(${PROJECT_NAME} LANGUAGES C CXX) # Set C++20 standard set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) if(NOT TARGET dolfinx) find_package(DOLFINX REQUIRED) endif() include(CheckSymbolExists) set(CMAKE_REQUIRED_INCLUDES ${PETSC_INCLUDE_DIRS}) check_symbol_exists(PETSC_USE_COMPLEX petscsystypes.h PETSC_SCALAR_COMPLEX) check_symbol_exists(PETSC_USE_REAL_DOUBLE petscsystypes.h PETSC_REAL_DOUBLE) # Add target to compile UFL files if(PETSC_SCALAR_COMPLEX EQUAL 1) if(PETSC_REAL_DOUBLE EQUAL 1) set(SCALAR_TYPE "--scalar_type=complex128") else() set(SCALAR_TYPE "--scalar_type=complex64") endif() else() if(PETSC_REAL_DOUBLE EQUAL 1) set(SCALAR_TYPE "--scalar_type=float64") else() set(SCALAR_TYPE "--scalar_type=float32") endif() endif() add_custom_command( OUTPUT poisson.c COMMAND ffcx ${CMAKE_CURRENT_SOURCE_DIR}/poisson.py ${SCALAR_TYPE} VERBATIM DEPENDS poisson.py COMMENT "Compile poisson.py using FFCx" ) set(CMAKE_INCLUDE_CURRENT_DIR ON) add_executable(${PROJECT_NAME} main.cpp ${CMAKE_CURRENT_BINARY_DIR}/poisson.c) target_link_libraries(${PROJECT_NAME} dolfinx) # Do not throw error for 'multi-line comments' (these are typical in rst which # includes LaTeX) include(CheckCXXCompilerFlag) check_cxx_compiler_flag("-Wno-comment" HAVE_NO_MULTLINE) set_source_files_properties( main.cpp PROPERTIES COMPILE_FLAGS "$<$:-Wno-comment -Wall -Wextra -pedantic -Werror>" ) # Test targets (used by DOLFINx testing system) set(TEST_PARAMETERS2 -np 2 ${MPIEXEC_PARAMS} "./${PROJECT_NAME}") set(TEST_PARAMETERS3 -np 3 ${MPIEXEC_PARAMS} "./${PROJECT_NAME}") add_test(NAME ${PROJECT_NAME}_mpi_2 COMMAND "mpirun" ${TEST_PARAMETERS2}) add_test(NAME ${PROJECT_NAME}_mpi_3 COMMAND "mpirun" ${TEST_PARAMETERS3}) add_test(NAME ${PROJECT_NAME}_serial COMMAND ${PROJECT_NAME}) fenics-dolfinx-0.9.0/cpp/demo/poisson/main.cpp000066400000000000000000000230211470520666000212640ustar00rootroot00000000000000// # Poisson equation // // This demo illustrates how to: // // * Solve a linear partial differential equation // * Create and apply Dirichlet boundary conditions // * Define Expressions // * Define a FunctionSpace // // ## Equation and problem definition // // The Poisson equation is the canonical elliptic partial differential // equation. For a domain $\Omega \subset \mathbb{R}^n$ with boundary // $\partial \Omega = \Gamma_{D} \cup \Gamma_{N}$, the Poisson equation // with particular boundary conditions reads: // // \begin{align*} // - \nabla^{2} u &= f \quad {\rm in} \ \Omega, \\ // u &= 0 \quad {\rm on} \ \Gamma_{D}, \\ // \nabla u \cdot n &= g \quad {\rm on} \ \Gamma_{N}. \\ // \end{align*} // // Here, $f$ and $g$ are input data and $n$ denotes the outward directed // boundary normal. The most standard variational form of Poisson // equation reads: find $u \in V$ such that // // $$ // a(u, v) = L(v) \quad \forall \ v \in V, // $$ // where $V$ is a suitable function space and // // \begin{align*} // a(u, v) &= \int_{\Omega} \nabla u \cdot \nabla v \, {\rm d} x, \\ // L(v) &= \int_{\Omega} f v \, {\rm d} x // + \int_{\Gamma_{N}} g v \, {\rm d} s. // \end{align*} // // The expression $a(u, v)$ is the bilinear form and $L(v)$ is the // linear form. It is assumed that all functions in $V$ satisfy the // Dirichlet boundary conditions ($u = 0 \ {\rm on} \ \Gamma_{D}$). // // In this demo, we shall consider the following definitions of the // input functions, the domain, and the boundaries: // // * $\Omega = [0,1] \times [0,1]$ (a unit square) // * $\Gamma_{D} = \{(0, y) \cup (1, y) \subset \partial \Omega\}$ // (Dirichlet boundary) // * $\Gamma_{N} = \{(x, 0) \cup (x, 1) \subset \partial \Omega\}$ // (Neumann boundary) // * $g = \sin(5x)$ (normal derivative) // * $f = 10\exp(-((x - 0.5)^2 + (y - 0.5)^2) / 0.02)$ (source term) // // // ## Implementation // // The implementation is split in two files: a file containing the // definition of the variational forms expressed in UFL and a C++ file // containing the actual solver. // // Running this demo requires the files: {download}`demo_poisson/main.cpp`, // {download}`demo_poisson/poisson.py` and // {download}`demo_poisson/CMakeLists.txt`. // // ### UFL code // // The UFL code is implemented in {download}`demo_poisson/poisson.py`. // ````{admonition} UFL code implemented in Python // :class: dropdown // ![ufl-code] // ```` // // ### C++ program // // The main solver is implemented in the // {download}`demo_poisson/main.cpp` file. // // At the top we include the DOLFINx header file and the generated // header file "Poisson.h" containing the variational forms for the // Poisson equation. For convenience we also include the DOLFINx // namespace. #include "poisson.h" #include #include #include #include #include #include #include #include #include #include #include using namespace dolfinx; using T = PetscScalar; using U = typename dolfinx::scalar_value_type_t; // Then follows the definition of the coefficient functions (for $f$ and // $g$), which are derived from the {cpp:class}`Expression` class in // DOLFINx // Inside the `main` function, we begin by defining a mesh of the // domain. As the unit square is a very standard domain, we can use a // built-in mesh provided by the {cpp:class}`UnitSquareMesh` factory. In // order to create a mesh consisting of 32 x 32 squares with each square // divided into two triangles, and the finite element space (specified // in the form file) defined relative to this mesh, we do as follows: int main(int argc, char* argv[]) { dolfinx::init_logging(argc, argv); PetscInitialize(&argc, &argv, nullptr, nullptr); { // Create mesh and function space auto part = mesh::create_cell_partitioner(mesh::GhostMode::shared_facet); auto mesh = std::make_shared>( mesh::create_rectangle(MPI_COMM_WORLD, {{{0.0, 0.0}, {2.0, 1.0}}}, {32, 16}, mesh::CellType::triangle, part)); auto element = basix::create_element( basix::element::family::P, basix::cell::type::triangle, 1, basix::element::lagrange_variant::unset, basix::element::dpc_variant::unset, false); auto V = std::make_shared>( fem::create_functionspace(mesh, element, {})); // Next, we define the variational formulation by initializing the // bilinear and linear forms ($a$, $L$) using the previously // defined {cpp:class}`FunctionSpace` `V`. Then we can create the // source and boundary flux term ($f$, $g$) and attach these to the // linear form. // Prepare and set Constants for the bilinear form auto kappa = std::make_shared>(2.0); auto f = std::make_shared>(V); auto g = std::make_shared>(V); // Define variational forms auto a = std::make_shared>(fem::create_form( *form_poisson_a, {V, V}, {}, {{"kappa", kappa}}, {}, {})); auto L = std::make_shared>(fem::create_form( *form_poisson_L, {V}, {{"f", f}, {"g", g}}, {}, {}, {})); // Now, the Dirichlet boundary condition ($u = 0$) can be created // using the class {cpp:class}`DirichletBC`. A // {cpp:class}`DirichletBC` takes two arguments: the value of the // boundary condition, and the part of the boundary on which the // condition applies. In our example, the value of the boundary // condition (0.0) can represented using a {cpp:class}`Function`, // and the Dirichlet boundary is defined by the indices of degrees // of freedom to which the boundary condition applies. The // definition of the Dirichlet boundary condition then looks as // follows: // Define boundary condition auto facets = mesh::locate_entities_boundary( *mesh, 1, [](auto x) { using U = typename decltype(x)::value_type; constexpr U eps = 1.0e-8; std::vector marker(x.extent(1), false); for (std::size_t p = 0; p < x.extent(1); ++p) { auto x0 = x(0, p); if (std::abs(x0) < eps or std::abs(x0 - 2) < eps) marker[p] = true; } return marker; }); const auto bdofs = fem::locate_dofs_topological( *V->mesh()->topology_mutable(), *V->dofmap(), 1, facets); auto bc = std::make_shared>(0.0, bdofs, V); f->interpolate( [](auto x) -> std::pair, std::vector> { std::vector f; for (std::size_t p = 0; p < x.extent(1); ++p) { auto dx = (x(0, p) - 0.5) * (x(0, p) - 0.5); auto dy = (x(1, p) - 0.5) * (x(1, p) - 0.5); f.push_back(10 * std::exp(-(dx + dy) / 0.02)); } return {f, {f.size()}}; }); g->interpolate( [](auto x) -> std::pair, std::vector> { std::vector f; for (std::size_t p = 0; p < x.extent(1); ++p) f.push_back(std::sin(5 * x(0, p))); return {f, {f.size()}}; }); // Now, we have specified the variational forms and can consider // the solution of the variational problem. First, we need to // define a {cpp:class}`Function` `u` to store the solution. (Upon // initialization, it is simply set to the zero function.) Next, we // can call the `solve` function with the arguments `a == L`, `u` // and `bc` as follows: auto u = std::make_shared>(V); auto A = la::petsc::Matrix(fem::petsc::create_matrix(*a), false); la::Vector b(L->function_spaces()[0]->dofmap()->index_map, L->function_spaces()[0]->dofmap()->index_map_bs()); MatZeroEntries(A.mat()); fem::assemble_matrix(la::petsc::Matrix::set_block_fn(A.mat(), ADD_VALUES), *a, {bc}); MatAssemblyBegin(A.mat(), MAT_FLUSH_ASSEMBLY); MatAssemblyEnd(A.mat(), MAT_FLUSH_ASSEMBLY); fem::set_diagonal(la::petsc::Matrix::set_fn(A.mat(), INSERT_VALUES), *V, {bc}); MatAssemblyBegin(A.mat(), MAT_FINAL_ASSEMBLY); MatAssemblyEnd(A.mat(), MAT_FINAL_ASSEMBLY); b.set(0.0); fem::assemble_vector(b.mutable_array(), *L); fem::apply_lifting(b.mutable_array(), {a}, {{bc}}, {}, T(1)); b.scatter_rev(std::plus()); bc->set(b.mutable_array(), std::nullopt); la::petsc::KrylovSolver lu(MPI_COMM_WORLD); la::petsc::options::set("ksp_type", "preonly"); la::petsc::options::set("pc_type", "lu"); lu.set_from_options(); lu.set_operator(A.mat()); la::petsc::Vector _u(la::petsc::create_vector_wrap(*u->x()), false); la::petsc::Vector _b(la::petsc::create_vector_wrap(b), false); lu.solve(_u.vec(), _b.vec()); // Update ghost values before output u->x()->scatter_fwd(); // The function `u` will be modified during the call to solve. A // {cpp:class}`Function` can be saved to a file. Here, we output // the solution to a `VTK` file (specified using the suffix `.pvd`) // for visualisation in an external program such as Paraview. // Save solution in VTK format io::VTKFile file(MPI_COMM_WORLD, "u.pvd", "w"); file.write({*u}, 0.0); #ifdef HAS_ADIOS2 // Save solution in VTX format io::VTXWriter vtx(MPI_COMM_WORLD, "u.bp", {u}, "bp4"); vtx.write(0); #endif } PetscFinalize(); return 0; } fenics-dolfinx-0.9.0/cpp/demo/poisson/poisson.py000066400000000000000000000025411470520666000217040ustar00rootroot00000000000000# The first step is to define the variational problem at hand. We define # the variational problem in UFL terms in a separate form file # {download}`demo_poisson/poisson.py`. We begin by defining the finite # element: from basix.ufl import element from ufl import ( Coefficient, Constant, FunctionSpace, Mesh, TestFunction, TrialFunction, ds, dx, grad, inner, ) e = element("Lagrange", "triangle", 1) # The first argument to :py:class:`FiniteElement` is the finite element # family, the second argument specifies the domain, while the third # argument specifies the polynomial degree. Thus, in this case, our # element `element` consists of first-order, continuous Lagrange basis # functions on triangles (or in order words, continuous piecewise linear # polynomials on triangles). # # Next, we use this element to initialize the trial and test functions # ($u$ and $v$) and the coefficient functions ($f$ and $g$): coord_element = element("Lagrange", "triangle", 1, shape=(2,)) mesh = Mesh(coord_element) V = FunctionSpace(mesh, e) u = TrialFunction(V) v = TestFunction(V) f = Coefficient(V) g = Coefficient(V) kappa = Constant(mesh) # Finally, we define the bilinear and linear forms according to the # variational formulation of the equations: a = kappa * inner(grad(u), grad(v)) * dx L = inner(f, v) * dx + inner(g, v) * ds fenics-dolfinx-0.9.0/cpp/demo/poisson_matrix_free/000077500000000000000000000000001470520666000222235ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/demo/poisson_matrix_free/CMakeLists.txt000066400000000000000000000040421470520666000247630ustar00rootroot00000000000000# This file was generated by running # # python cmake/scripts/generate-cmakefiles from dolfinx/cpp # cmake_minimum_required(VERSION 3.19) set(PROJECT_NAME demo_poisson_matrix_free) project(${PROJECT_NAME} LANGUAGES C CXX) # Set C++20 standard set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) if(NOT TARGET dolfinx) find_package(DOLFINX REQUIRED) endif() include(CheckSymbolExists) set(CMAKE_REQUIRED_INCLUDES ${PETSC_INCLUDE_DIRS}) check_symbol_exists(PETSC_USE_COMPLEX petscsystypes.h PETSC_SCALAR_COMPLEX) check_symbol_exists(PETSC_USE_REAL_DOUBLE petscsystypes.h PETSC_REAL_DOUBLE) # Add target to compile UFL files if(PETSC_SCALAR_COMPLEX EQUAL 1) if(PETSC_REAL_DOUBLE EQUAL 1) set(SCALAR_TYPE "--scalar_type=complex128") else() set(SCALAR_TYPE "--scalar_type=complex64") endif() else() if(PETSC_REAL_DOUBLE EQUAL 1) set(SCALAR_TYPE "--scalar_type=float64") else() set(SCALAR_TYPE "--scalar_type=float32") endif() endif() add_custom_command( OUTPUT poisson.c COMMAND ffcx ${CMAKE_CURRENT_SOURCE_DIR}/poisson.py ${SCALAR_TYPE} VERBATIM DEPENDS poisson.py COMMENT "Compile poisson.py using FFCx" ) set(CMAKE_INCLUDE_CURRENT_DIR ON) add_executable(${PROJECT_NAME} main.cpp ${CMAKE_CURRENT_BINARY_DIR}/poisson.c) target_link_libraries(${PROJECT_NAME} dolfinx) # Do not throw error for 'multi-line comments' (these are typical in rst which # includes LaTeX) include(CheckCXXCompilerFlag) check_cxx_compiler_flag("-Wno-comment" HAVE_NO_MULTLINE) set_source_files_properties( main.cpp PROPERTIES COMPILE_FLAGS "$<$:-Wno-comment -Wall -Wextra -pedantic -Werror>" ) # Test targets (used by DOLFINx testing system) set(TEST_PARAMETERS2 -np 2 ${MPIEXEC_PARAMS} "./${PROJECT_NAME}") set(TEST_PARAMETERS3 -np 3 ${MPIEXEC_PARAMS} "./${PROJECT_NAME}") add_test(NAME ${PROJECT_NAME}_mpi_2 COMMAND "mpirun" ${TEST_PARAMETERS2}) add_test(NAME ${PROJECT_NAME}_mpi_3 COMMAND "mpirun" ${TEST_PARAMETERS3}) add_test(NAME ${PROJECT_NAME}_serial COMMAND ${PROJECT_NAME}) fenics-dolfinx-0.9.0/cpp/demo/poisson_matrix_free/main.cpp000066400000000000000000000165301470520666000236600ustar00rootroot00000000000000// ```text // Copyright (C) 2022 Igor A. Baratta // This file is part of DOLFINx (https://www.fenicsproject.org) // SPDX-License-Identifier: LGPL-3.0-or-later // ``` // # Matrix-free conjugate gradient (CG) solver // // This demo illustrates how to: // * Solve a linear partial differential equation using a matrix-free CG // solver // * Create and apply Dirichlet boundary conditions // * Compute errors // // \begin{align*} // - \nabla^{2} u &= f \quad {\rm in} \ \Omega, \\ // u &= u_D \quad {\rm on} \ \Gamma_{D} // \end{align*} // // where // \begin{align*} // u_D &= 1 + x^2 + 2y^2, \\ // f = -6 // \end{align*} // // ```{note} // This demo illustrates the use of a matrix-free Conjugate // Gradient solver. Many practical problems will also require // a preconditioner to create an efficient solver. // ``` // // ## UFL form file // // The UFL file is implemented in // {download}`demo_poisson_matrix_free/poisson.py`. // ````{admonition} UFL form implemented in python // :class: dropdown // ![ufl-code] // ```` // // ## C++ program #include "poisson.h" #include #include #include #include #include #include #include #include #include #include using namespace dolfinx; namespace linalg { /// @brief Compute vector r = alpha * x + y. /// @param[out] r /// @param[in] alpha /// @param[in] x /// @param[in] y void axpy(auto&& r, auto alpha, auto&& x, auto&& y) { std::ranges::transform(x.array(), y.array(), r.mutable_array().begin(), [alpha](auto x, auto y) { return alpha * x + y; }); } /// @brief Solve problem A.x = b using the conjugate gradient (CG) /// method. /// /// @param[in, out] x Solution vector, may be set to an initial guess /// hence no zeroed. /// @param[in] b Right-hand side vector. /// @param[in] action Function that computes the action of the linear /// operator on a vector. /// @param[in] kmax Maximum number of iterations /// @param[in] rtol Relative tolerances for convergence /// @return Number of CG iterations. /// @pre The ghost values of `x` and `b` must be updated before this /// function is called. int cg(auto& x, auto& b, auto action, int kmax = 50, double rtol = 1e-8) { using T = typename std::decay_t::value_type; // Create working vectors la::Vector r(b), y(b); // Compute initial residual r0 = b - Ax0 action(x, y); axpy(r, T(-1), y, b); // Create p work vector la::Vector p(r); // Iterations of CG auto rnorm0 = la::squared_norm(r); auto rtol2 = rtol * rtol; auto rnorm = rnorm0; int k = 0; while (k < kmax) { ++k; // Compute y = A p action(p, y); // Compute alpha = r.r/p.y T alpha = rnorm / la::inner_product(p, y); // Update x (x <- x + alpha*p) axpy(x, alpha, p, x); // Update r (r <- r - alpha*y) axpy(r, -alpha, y, r); // Update residual norm auto rnorm_new = la::squared_norm(r); T beta = rnorm_new / rnorm; rnorm = rnorm_new; if (rnorm / rnorm0 < rtol2) break; // Update p (p <- beta * p + r) axpy(p, beta, p, r); } return k; } } // namespace linalg template void solver(MPI_Comm comm) { // Create mesh and function space auto mesh = std::make_shared>(mesh::create_rectangle( comm, {{{0.0, 0.0}, {1.0, 1.0}}}, {10, 10}, mesh::CellType::triangle, mesh::create_cell_partitioner(mesh::GhostMode::none))); auto element = basix::create_element( basix::element::family::P, basix::cell::type::triangle, 2, basix::element::lagrange_variant::unset, basix::element::dpc_variant::unset, false); auto V = std::make_shared>( fem::create_functionspace(mesh, element, {})); // Prepare and set Constants for the bilinear form auto f = std::make_shared>(-6.0); // Define variational forms auto L = std::make_shared>( fem::create_form(*form_poisson_L, {V}, {}, {{"f", f}}, {}, {})); // Action of the bilinear form "a" on a function ui auto ui = std::make_shared>(V); auto M = std::make_shared>( fem::create_form(*form_poisson_M, {V}, {{"ui", ui}}, {{}}, {}, {})); // Define boundary condition auto u_D = std::make_shared>(V); u_D->interpolate( [](auto x) -> std::pair, std::vector> { std::vector f; for (std::size_t p = 0; p < x.extent(1); ++p) f.push_back(1 + x(0, p) * x(0, p) + 2 * x(1, p) * x(1, p)); return {f, {f.size()}}; }); mesh->topology_mutable()->create_connectivity(1, 2); const std::vector facets = mesh::exterior_facet_indices(*mesh->topology()); std::vector bdofs = fem::locate_dofs_topological( *V->mesh()->topology_mutable(), *V->dofmap(), 1, facets); auto bc = std::make_shared>(u_D, bdofs); // Assemble RHS vector la::Vector b(V->dofmap()->index_map, V->dofmap()->index_map_bs()); fem::assemble_vector(b.mutable_array(), *L); // Apply lifting to account for Dirichlet boundary condition // b <- b - A * x_bc bc->set(ui->x()->mutable_array(), std::nullopt, T(-1)); fem::assemble_vector(b.mutable_array(), *M); // Communicate ghost values b.scatter_rev(std::plus()); // Set BC dofs to zero (effectively zeroes columns of A) bc->set(b.mutable_array(), std::nullopt, T(0)); b.scatter_fwd(); // Pack coefficients and constants auto coeff = fem::allocate_coefficient_storage(*M); std::vector constants = fem::pack_constants(*M); // Create function for computing the action of A on x (y = Ax) auto action = [&M, &ui, &bc, &coeff, &constants](auto& x, auto& y) { // Zero y y.set(0.0); // Update coefficient ui (just copy data from x to ui) std::ranges::copy(x.array(), ui->x()->mutable_array().begin()); // Compute action of A on x fem::pack_coefficients(*M, coeff); fem::assemble_vector(y.mutable_array(), *M, std::span(constants), fem::make_coefficients_span(coeff)); // Set BC dofs to zero (effectively zeroes rows of A) bc->set(y.mutable_array(), std::nullopt, T(0)); // Accumulate ghost values y.scatter_rev(std::plus()); // Update ghost values y.scatter_fwd(); }; // Compute solution using the CG method auto u = std::make_shared>(V); int num_it = linalg::cg(*u->x(), b, action, 200, 1e-6); // Set BC values in the solution vectors bc->set(u->x()->mutable_array(), std::nullopt, T(1)); // Compute L2 error (squared) of the solution vector e = (u - u_d, u // - u_d)*dx auto E = std::make_shared>(fem::create_form( *form_poisson_E, {}, {{"uexact", u_D}, {"usol", u}}, {}, {}, {}, mesh)); T error = fem::assemble_scalar(*E); if (dolfinx::MPI::rank(comm) == 0) { std::cout << "Number of CG iterations " << num_it << std::endl; std::cout << "Finite element error (L2 norm (squared)) " << std::abs(error) << std::endl; } } /// Main program int main(int argc, char* argv[]) { using T = PetscScalar; using U = typename dolfinx::scalar_value_type_t; init_logging(argc, argv); MPI_Init(&argc, &argv); solver(MPI_COMM_WORLD); MPI_Finalize(); return 0; } fenics-dolfinx-0.9.0/cpp/demo/poisson_matrix_free/poisson.py000066400000000000000000000016421470520666000242720ustar00rootroot00000000000000# UFL input for the Matrix-free Poisson Demo from basix.ufl import element from ufl import ( Coefficient, Constant, FunctionSpace, Mesh, TestFunction, TrialFunction, action, dx, grad, inner, ) coord_element = element("Lagrange", "triangle", 1, shape=(2,)) mesh = Mesh(coord_element) # Function Space e = element("Lagrange", "triangle", 2) V = FunctionSpace(mesh, e) # Trial and test functions u = TrialFunction(V) v = TestFunction(V) # Constant RHS f = Constant(V) # Bilinear and linear forms according to the variational # formulation of the equations: a = inner(grad(u), grad(v)) * dx L = inner(f, v) * dx # Linear form representing the action of the form `a`` on the # coefficient `ui`:` ui = Coefficient(V) M = action(a, ui) # Form to compute the L2 norm of the error usol = Coefficient(V) uexact = Coefficient(V) E = inner(usol - uexact, usol - uexact) * dx forms = [M, L, E] fenics-dolfinx-0.9.0/cpp/doc/000077500000000000000000000000001470520666000157655ustar00rootroot00000000000000fenics-dolfinx-0.9.0/cpp/doc/Doxyfile000066400000000000000000003544771470520666000175170ustar00rootroot00000000000000# Doxyfile 1.9.5 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a double hash (##) is considered a comment and is placed in # front of the TAG it is preceding. # # All text after a single hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists, items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (\" \"). # # Note: # # Use doxygen to compare the used configuration file with the template # configuration file: # doxygen -x [configFile] # Use doxygen to compare the used configuration file with the template # configuration file without replacing the environment variables or CMake type # replacement variables: # doxygen -x_noenv [configFile] #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the configuration # file that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # https://www.gnu.org/software/libiconv/ for the list of possible encodings. # The default value is: UTF-8. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded by # double-quotes, unless you are using Doxywizard) that should identify the # project for which the documentation is generated. This name is used in the # title of most generated pages and in a few other places. # The default value is: My Project. PROJECT_NAME = DOLFINx # The PROJECT_NUMBER tag can be used to enter a project or revision number. This # could be handy for archiving the generated documentation or if some version # control system is used. PROJECT_NUMBER = $(DOLFINX_VERSION) # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a # quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = "DOLFINx C++ interface" # With the PROJECT_LOGO tag one can specify a logo or an icon that is included # in the documentation. The maximum height of the logo should not exceed 55 # pixels and the maximum width should not exceed 200 pixels. Doxygen will copy # the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path # into which the generated documentation will be written. If a relative path is # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. OUTPUT_DIRECTORY = # If the CREATE_SUBDIRS tag is set to YES then doxygen will create up to 4096 # sub-directories (in 2 levels) under the output directory of each output format # and will distribute the generated files over these directories. Enabling this # option can be useful when feeding doxygen a huge amount of source files, where # putting all generated files in the same directory would otherwise causes # performance problems for the file system. Adapt CREATE_SUBDIRS_LEVEL to # control the number of sub-directories. # The default value is: NO. CREATE_SUBDIRS = YES # Controls the number of sub-directories that will be created when # CREATE_SUBDIRS tag is set to YES. Level 0 represents 16 directories, and every # level increment doubles the number of directories, resulting in 4096 # directories at level 8 which is the default and also the maximum value. The # sub-directories are organized in 2 levels, the first level always has a fixed # numer of 16 directories. # Minimum value: 0, maximum value: 8, default value: 8. # This tag requires that the tag CREATE_SUBDIRS is set to YES. CREATE_SUBDIRS_LEVEL = 8 # If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII # characters to appear in the names of generated files. If set to NO, non-ASCII # characters will be escaped, for example _xE3_x81_x84 will be used for Unicode # U+3044. # The default value is: NO. ALLOW_UNICODE_NAMES = YES # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Bulgarian, # Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, English # (United States), Esperanto, Farsi (Persian), Finnish, French, German, Greek, # Hindi, Hungarian, Indonesian, Italian, Japanese, Japanese-en (Japanese with # English messages), Korean, Korean-en (Korean with English messages), Latvian, # Lithuanian, Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, # Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, # Swedish, Turkish, Ukrainian and Vietnamese. # The default value is: English. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member # descriptions after the members that are listed in the file and class # documentation (similar to Javadoc). Set to NO to disable this. # The default value is: YES. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief # description of a member or function before the detailed description # # Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. # The default value is: YES. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator that is # used to form the text in various listings. Each string in this list, if found # as the leading text of the brief description, will be stripped from the text # and the result, after processing the whole list, is used as the annotated # text. Otherwise, the brief description is used as-is. If left blank, the # following values are used ($name is automatically replaced with the name of # the entity):The $name class, The $name widget, The $name file, is, provides, # specifies, contains, represents, a, an and the. ABBREVIATE_BRIEF = "The $name class" \ "The $name widget" \ "The $name file" \ is \ provides \ specifies \ contains \ represents \ a \ an \ the # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # doxygen will generate a detailed section even if there is only a brief # description. # The default value is: NO. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. # The default value is: NO. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path # before files name in the file list and in the header files. If set to NO the # shortest path that makes the file name unique will be used # The default value is: YES. FULL_PATH_NAMES = YES # The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. # Stripping is only done if one of the specified strings matches the left-hand # part of the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the path to # strip. # # Note that you can specify absolute paths here, but also relative paths, which # will be relative from the directory where doxygen is started. # This tag requires that the tag FULL_PATH_NAMES is set to YES. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the # path mentioned in the documentation of a class, which tells the reader which # header file to include in order to use a class. If left blank only the name of # the header file containing the class definition is used. Otherwise one should # specify the list of include paths that are normally passed to the compiler # using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but # less readable) file names. This can be useful is your file systems doesn't # support long names like on DOS, Mac, or CD-ROM. # The default value is: NO. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the # first line (until the first dot) of a Javadoc-style comment as the brief # description. If set to NO, the Javadoc-style will behave just like regular Qt- # style comments (thus requiring an explicit @brief command for a brief # description.) # The default value is: NO. JAVADOC_AUTOBRIEF = NO # If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line # such as # /*************** # as being the beginning of a Javadoc-style comment "banner". If set to NO, the # Javadoc-style will behave just like regular comments and it will not be # interpreted by doxygen. # The default value is: NO. JAVADOC_BANNER = NO # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first # line (until the first dot) of a Qt-style comment as the brief description. If # set to NO, the Qt-style will behave just like regular Qt-style comments (thus # requiring an explicit \brief command for a brief description.) # The default value is: NO. QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a # multi-line C++ special comment block (i.e. a block of //! or /// comments) as # a brief description. This used to be the default behavior. The new default is # to treat a multi-line C++ comment block as a detailed description. Set this # tag to YES if you prefer the old behavior instead. # # Note that setting this tag to YES also means that rational rose comments are # not recognized any more. # The default value is: NO. MULTILINE_CPP_IS_BRIEF = NO # By default Python docstrings are displayed as preformatted text and doxygen's # special commands cannot be used. By setting PYTHON_DOCSTRING to NO the # doxygen's special commands can be used and the contents of the docstring # documentation blocks is shown as doxygen documentation. # The default value is: YES. PYTHON_DOCSTRING = YES # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the # documentation from any documented member that it re-implements. # The default value is: YES. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new # page for each member. If set to NO, the documentation of a member will be part # of the file/class/namespace that contains it. # The default value is: NO. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen # uses this value to replace tabs by spaces in code fragments. # Minimum value: 1, maximum value: 16, default value: 4. TAB_SIZE = 4 # This tag can be used to specify a number of aliases that act as commands in # the documentation. An alias has the form: # name=value # For example adding # "sideeffect=@par Side Effects:^^" # will allow you to put the command \sideeffect (or @sideeffect) in the # documentation, which will result in a user-defined paragraph with heading # "Side Effects:". Note that you cannot put \n's in the value part of an alias # to insert newlines (in the resulting output). You can put ^^ in the value part # of an alias to insert a newline as if a physical newline was in the original # file. When you need a literal { or } or , in the value part of an alias you # have to escape them by means of a backslash (\), this can lead to conflicts # with the commands \{ and \} for these it is advised to use the version @{ and # @} or use a double escape (\\{ and \\}) ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources # only. Doxygen will then generate output that is more tailored for C. For # instance, some of the names that are used will be different. The list of all # members will be omitted, etc. # The default value is: NO. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or # Python sources only. Doxygen will then generate output that is more tailored # for that language. For instance, namespaces will be presented as packages, # qualified scopes will look different, etc. # The default value is: NO. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources. Doxygen will then generate output that is tailored for Fortran. # The default value is: NO. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for VHDL. # The default value is: NO. OPTIMIZE_OUTPUT_VHDL = NO # Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice # sources only. Doxygen will then generate output that is more tailored for that # language. For instance, namespaces will be presented as modules, types will be # separated into more groups, etc. # The default value is: NO. OPTIMIZE_OUTPUT_SLICE = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and # language is one of the parsers supported by doxygen: IDL, Java, JavaScript, # Csharp (C#), C, C++, Lex, D, PHP, md (Markdown), Objective-C, Python, Slice, # VHDL, Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: # FortranFree, unknown formatted Fortran: Fortran. In the later case the parser # tries to guess whether the code is fixed or free formatted code, this is the # default for Fortran type files). For instance to make doxygen treat .inc files # as Fortran files (default is PHP), and .f files as C (default is Fortran), # use: inc=Fortran f=C. # # Note: For files without extension you can use no_extension as a placeholder. # # Note that for custom extensions you also need to set FILE_PATTERNS otherwise # the files are not read by doxygen. When specifying no_extension you should add # * to the FILE_PATTERNS. # # Note see also the list of default file extension mappings. EXTENSION_MAPPING = # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments # according to the Markdown format, which allows for more readable # documentation. See https://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you can # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in # case of backward compatibilities issues. # The default value is: YES. MARKDOWN_SUPPORT = YES # When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up # to that level are automatically included in the table of contents, even if # they do not have an id attribute. # Note: This feature currently applies only to Markdown headings. # Minimum value: 0, maximum value: 99, default value: 5. # This tag requires that the tag MARKDOWN_SUPPORT is set to YES. TOC_INCLUDE_HEADINGS = 0 # When enabled doxygen tries to link words that correspond to documented # classes, or namespaces to their corresponding documentation. Such a link can # be prevented in individual cases by putting a % sign in front of the word or # globally by setting AUTOLINK_SUPPORT to NO. # The default value is: YES. AUTOLINK_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should set this # tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); # versus func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. # The default value is: NO. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. # The default value is: NO. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip (see: # https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen # will parse them like normal C++ but will assume all classes use public instead # of private inheritance when no explicit protection keyword is present. # The default value is: NO. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate # getter and setter methods for a property. Setting this option to YES will make # doxygen to replace the get and set methods by a property in the documentation. # This will only work if the methods are indeed getting or setting a simple # type. If this is not the case, or you want to show the methods anyway, you # should set this option to NO. # The default value is: YES. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. # The default value is: NO. DISTRIBUTE_GROUP_DOC = NO # If one adds a struct or class to a group and this option is enabled, then also # any nested class or struct is added to the same group. By default this option # is disabled and one has to add nested compounds explicitly via \ingroup. # The default value is: NO. GROUP_NESTED_COMPOUNDS = NO # Set the SUBGROUPING tag to YES to allow class member groups of the same type # (for instance a group of public functions) to be put as a subgroup of that # type (e.g. under the Public Functions section). Set it to NO to prevent # subgrouping. Alternatively, this can be done per class using the # \nosubgrouping command. # The default value is: YES. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions # are shown inside the group in which they are included (e.g. using \ingroup) # instead of on a separate page (for HTML and Man pages) or section (for LaTeX # and RTF). # # Note that this feature does not work in combination with # SEPARATE_MEMBER_PAGES. # The default value is: NO. INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions # with only public data fields or simple typedef fields will be shown inline in # the documentation of the scope in which they are defined (i.e. file, # namespace, or group documentation), provided this scope is documented. If set # to NO, structs, classes, and unions are shown on a separate page (for HTML and # Man pages) or section (for LaTeX and RTF). # The default value is: NO. INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or # enum is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically be # useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. # The default value is: NO. TYPEDEF_HIDES_STRUCT = NO # The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This # cache is used to resolve symbols given their name and scope. Since this can be # an expensive process and often the same symbol appears multiple times in the # code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small # doxygen will become slower. If the cache is too large, memory is wasted. The # cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range # is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 # symbols. At the end of a run doxygen will report the cache usage and suggest # the optimal cache size from a speed point of view. # Minimum value: 0, maximum value: 9, default value: 0. LOOKUP_CACHE_SIZE = 0 # The NUM_PROC_THREADS specifies the number of threads doxygen is allowed to use # during processing. When set to 0 doxygen will based this on the number of # cores available in the system. You can set it explicitly to a value larger # than 0 to get more control over the balance between CPU load and processing # speed. At this moment only the input processing can be done using multiple # threads. Since this is still an experimental feature the default is set to 1, # which effectively disables parallel processing. Please report any issues you # encounter. Generating dot graphs in parallel is controlled by the # DOT_NUM_THREADS setting. # Minimum value: 0, maximum value: 32, default value: 1. NUM_PROC_THREADS = 1 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in # documentation are documented, even if no documentation was available. Private # class members and static file members will be hidden unless the # EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. # Note: This will also disable the warnings about undocumented members that are # normally produced when WARNINGS is set to YES. # The default value is: NO. EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will # be included in the documentation. # The default value is: NO. EXTRACT_PRIVATE = NO # If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual # methods of a class will be included in the documentation. # The default value is: NO. EXTRACT_PRIV_VIRTUAL = NO # If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal # scope will be included in the documentation. # The default value is: NO. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES, all static members of a file will be # included in the documentation. # The default value is: NO. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined # locally in source files will be included in the documentation. If set to NO, # only classes defined in header files are included. Does not have any effect # for Java sources. # The default value is: YES. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. If set to YES, local methods, # which are defined in the implementation section but not in the interface are # included in the documentation. If set to NO, only methods in the interface are # included. # The default value is: NO. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base name of # the file that contains the anonymous namespace. By default anonymous namespace # are hidden. # The default value is: NO. EXTRACT_ANON_NSPACES = NO # If this flag is set to YES, the name of an unnamed parameter in a declaration # will be determined by the corresponding definition. By default unnamed # parameters remain unnamed in the output. # The default value is: YES. RESOLVE_UNNAMED_PARAMS = YES # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all # undocumented members inside documented classes or files. If set to NO these # members will be included in the various overviews, but no documentation # section is generated. This option has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. If set # to NO, these classes will be included in the various overviews. This option # has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend # declarations. If set to NO, these declarations will be included in the # documentation. # The default value is: NO. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any # documentation blocks found inside the body of a function. If set to NO, these # blocks will be appended to the function's detailed documentation block. # The default value is: NO. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation that is typed after a # \internal command is included. If the tag is set to NO then the documentation # will be excluded. Set it to YES to include the internal documentation. # The default value is: NO. INTERNAL_DOCS = NO # With the correct setting of option CASE_SENSE_NAMES doxygen will better be # able to match the capabilities of the underlying filesystem. In case the # filesystem is case sensitive (i.e. it supports files in the same directory # whose names only differ in casing), the option must be set to YES to properly # deal with such files in case they appear in the input. For filesystems that # are not case sensitive the option should be set to NO to properly deal with # output files written for symbols that only differ in casing, such as for two # classes, one named CLASS and the other named Class, and to also support # references to files without having to specify the exact matching casing. On # Windows (including Cygwin) and MacOS, users should typically set this option # to NO, whereas on Linux or other Unix flavors it should typically be set to # YES. # Possible values are: SYSTEM, NO and YES. # The default value is: SYSTEM. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with # their full class and namespace scopes in the documentation. If set to YES, the # scope will be hidden. # The default value is: NO. HIDE_SCOPE_NAMES = YES # If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will # append additional text to a page's title, such as Class Reference. If set to # YES the compound reference will be hidden. # The default value is: NO. HIDE_COMPOUND_REFERENCE= NO # If the SHOW_HEADERFILE tag is set to YES then the documentation for a class # will show which file needs to be included to use the class. # The default value is: YES. SHOW_HEADERFILE = YES # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of # the files that are included by a file in the documentation of that file. # The default value is: YES. SHOW_INCLUDE_FILES = YES # If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each # grouped member an include statement to the documentation, telling the reader # which file to include in order to use the member. # The default value is: NO. SHOW_GROUPED_MEMB_INC = NO # If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include # files with double quotes in the documentation rather than with sharp brackets. # The default value is: NO. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the # documentation for inline members. # The default value is: YES. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the # (detailed) documentation of file and class members alphabetically by member # name. If set to NO, the members will appear in declaration order. # The default value is: YES. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief # descriptions of file, namespace and class members alphabetically by member # name. If set to NO, the members will appear in declaration order. Note that # this will also influence the order of the classes in the class list. # The default value is: NO. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the # (brief and detailed) documentation of class members so that constructors and # destructors are listed first. If set to NO the constructors will appear in the # respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. # Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief # member documentation. # Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting # detailed member documentation. # The default value is: NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy # of group names into alphabetical order. If set to NO the group names will # appear in their defined order. # The default value is: NO. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by # fully-qualified names, including namespaces. If set to NO, the class list will # be sorted only by class name, not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the alphabetical # list. # The default value is: NO. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper # type resolution of all parameters of a function it will reject a match between # the prototype and the implementation of a member function even if there is # only one candidate or it is obvious which candidate to choose by doing a # simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still # accept a match between prototype and implementation in such cases. # The default value is: NO. STRICT_PROTO_MATCHING = YES # The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo # list. This list is created by putting \todo commands in the documentation. # The default value is: YES. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test # list. This list is created by putting \test commands in the documentation. # The default value is: YES. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug # list. This list is created by putting \bug commands in the documentation. # The default value is: YES. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) # the deprecated list. This list is created by putting \deprecated commands in # the documentation. # The default value is: YES. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional documentation # sections, marked by \if ... \endif and \cond # ... \endcond blocks. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the # initial value of a variable or macro / define can have for it to appear in the # documentation. If the initializer consists of more lines than specified here # it will be hidden. Use a value of 0 to hide initializers completely. The # appearance of the value of individual variables and macros / defines can be # controlled using \showinitializer or \hideinitializer command in the # documentation regardless of this setting. # Minimum value: 0, maximum value: 10000, default value: 30. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated at # the bottom of the documentation of classes and structs. If set to YES, the # list will mention the files that were used to generate the documentation. # The default value is: YES. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. This # will remove the Files entry from the Quick Index and from the Folder Tree View # (if specified). # The default value is: YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces # page. This will remove the Namespaces entry from the Quick Index and from the # Folder Tree View (if specified). # The default value is: YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command command input-file, where command is the value of the # FILE_VERSION_FILTER tag, and input-file is the name of an input file provided # by doxygen. Whatever the program writes to standard output is used as the file # version. For an example see the documentation. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. You can # optionally specify a file name after the option, if omitted DoxygenLayout.xml # will be used as the name of the layout file. See also section "Changing the # layout of pages" for information. # # Note that if you run doxygen from a directory containing a file called # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE # tag is left empty. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files containing # the reference definitions. This must be a list of .bib files. The .bib # extension is automatically appended if omitted. This requires the bibtex tool # to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. # For LaTeX the style of the bibliography can be controlled using # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the # search path. See also \cite for info how to create references. CITE_BIB_FILES = #--------------------------------------------------------------------------- # Configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated to # standard output by doxygen. If QUIET is set to YES this implies that the # messages are off. # The default value is: NO. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated to standard error (stderr) by doxygen. If WARNINGS is set to YES # this implies that the warnings are on. # # Tip: Turn warnings on while writing the documentation. # The default value is: YES. WARNINGS = YES # If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag # will automatically be disabled. # The default value is: YES. WARN_IF_UNDOCUMENTED = YES # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as documenting some parameters in # a documented function twice, or documenting parameters that don't exist or # using markup commands wrongly. # The default value is: YES. WARN_IF_DOC_ERROR = YES # If WARN_IF_INCOMPLETE_DOC is set to YES, doxygen will warn about incomplete # function parameter documentation. If set to NO, doxygen will accept that some # parameters have no documentation without warning. # The default value is: YES. WARN_IF_INCOMPLETE_DOC = YES # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return # value. If set to NO, doxygen will only warn about wrong parameter # documentation, but not about the absence of documentation. If EXTRACT_ALL is # set to YES then this flag will automatically be disabled. See also # WARN_IF_INCOMPLETE_DOC # The default value is: NO. WARN_NO_PARAMDOC = NO # If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when # a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS # then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but # at the end of the doxygen process doxygen will return with a non-zero status. # Possible values are: NO, YES and FAIL_ON_WARNINGS. # The default value is: NO. WARN_AS_ERROR = YES # The WARN_FORMAT tag determines the format of the warning messages that doxygen # can produce. The string should contain the $file, $line, and $text tags, which # will be replaced by the file and line number from which the warning originated # and the warning text. Optionally the format may contain $version, which will # be replaced by the version of the file (if it could be obtained via # FILE_VERSION_FILTER) # See also: WARN_LINE_FORMAT # The default value is: $file:$line: $text. WARN_FORMAT = "$file:$line: $text" # In the $text part of the WARN_FORMAT command it is possible that a reference # to a more specific place is given. To make it easier to jump to this place # (outside of doxygen) the user can define a custom "cut" / "paste" string. # Example: # WARN_LINE_FORMAT = "'vi $file +$line'" # See also: WARN_FORMAT # The default value is: at line $line of file $file. WARN_LINE_FORMAT = "at line $line of file $file" # The WARN_LOGFILE tag can be used to specify a file to which warning and error # messages should be written. If left blank the output is written to standard # error (stderr). In case the file specified cannot be opened for writing the # warning and error messages are written to standard error. When as file - is # specified the warning and error messages are written to standard output # (stdout). WARN_LOGFILE = #--------------------------------------------------------------------------- # Configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag is used to specify the files and/or directories that contain # documented source files. You may enter file names like myfile.cpp or # directories like /usr/src/myproject. Separate the files or directories with # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. INPUT = ../dolfinx \ ../demo # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv # documentation (see: # https://www.gnu.org/software/libiconv/) for the list of possible encodings. # See also: INPUT_FILE_ENCODING # The default value is: UTF-8. INPUT_ENCODING = UTF-8 # This tag can be used to specify the character encoding of the source files # that doxygen parses The INPUT_FILE_ENCODING tag can be used to specify # character encoding on a per file pattern basis. Doxygen will compare the file # name with each pattern and apply the encoding instead of the default # INPUT_ENCODING) if there is a match. The character encodings are a list of the # form: pattern=encoding (like *.php=ISO-8859-1). See cfg_input_encoding # "INPUT_ENCODING" for further information on supported encodings. INPUT_FILE_ENCODING = # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and # *.h) to filter out the source-files in the directories. # # Note that for custom extensions or not directly supported extensions you also # need to set EXTENSION_MAPPING for the extension otherwise the files are not # read by doxygen. # # Note the list of default checked file patterns might differ from the list of # default file extension mappings. # # If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, # *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, # *.hh, *.hxx, *.hpp, *.h++, *.l, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, # *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C # comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, # *.vhdl, *.ucf, *.qsf and *.ice. FILE_PATTERNS = *.c \ *.cc \ *.cxx \ *.cpp \ *.c++ \ *.java \ *.ii \ *.ixx \ *.ipp \ *.i++ \ *.inl \ *.idl \ *.ddl \ *.odl \ *.h \ *.hh \ *.hxx \ *.hpp \ *.h++ \ *.cs \ *.d \ *.php \ *.php4 \ *.php5 \ *.phtml \ *.inc \ *.m \ *.markdown \ *.md \ *.mm \ *.dox \ *.py \ *.pyw \ *.f90 \ *.f95 \ *.f03 \ *.f08 \ *.f \ *.for \ *.tcl \ *.vhd \ *.vhdl \ *.ucf \ *.qsf # The RECURSIVE tag can be used to specify whether or not subdirectories should # be searched for input files as well. # The default value is: NO. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = ../dolfinx/io/pugixml.hpp \ ../dolfinx/io/pugixml.cpp \ ../dolfinx/io/pugiconfig.hpp \ ../dolfinx/common/loguru.hpp \ ../dolfinx/common/loguru.cpp \ # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. # The default value is: NO. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # ANamespace::AClass, ANamespace::*Test # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories use the pattern */test/* EXCLUDE_SYMBOLS = _* # The EXAMPLE_PATH tag can be used to specify one or more files or directories # that contain example code fragments that are included (see the \include # command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and # *.h) to filter out the source-files in the directories. If left blank all # files are included. EXAMPLE_PATTERNS = * # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude commands # irrespective of the value of the RECURSIVE tag. # The default value is: NO. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or directories # that contain images that are to be included in the documentation (see the # \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command: # # # # where is the value of the INPUT_FILTER tag, and is the # name of an input file. Doxygen will then use the output that the filter # program writes to standard output. If FILTER_PATTERNS is specified, this tag # will be ignored. # # Note that the filter must not add or remove lines; it is applied before the # code is scanned, but not when the output code is generated. If lines are added # or removed, the anchors will not be placed correctly. # # Note that doxygen will use the data processed and written to standard output # for further processing, therefore nothing else, like debug statements or used # commands (so in case of a Windows batch file always use @echo OFF), should be # written to standard output. # # Note that for custom extensions or not directly supported extensions you also # need to set EXTENSION_MAPPING for the extension otherwise the files are not # properly processed by doxygen. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: pattern=filter # (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how # filters are used. If the FILTER_PATTERNS tag is empty or if none of the # patterns match the file name, INPUT_FILTER is applied. # # Note that for custom extensions or not directly supported extensions you also # need to set EXTENSION_MAPPING for the extension otherwise the files are not # properly processed by doxygen. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will also be used to filter the input files that are used for # producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). # The default value is: NO. FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) and # it is also possible to disable source filtering for a specific pattern using # *.ext= (so without naming a filter). # This tag requires that the tag FILTER_SOURCE_FILES is set to YES. FILTER_SOURCE_PATTERNS = # If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that # is part of the input, its contents will be placed on the main page # (index.html). This can be useful if you have a project on for instance GitHub # and want to reuse the introduction page also for the doxygen output. USE_MDFILE_AS_MAINPAGE = # The Fortran standard specifies that for fixed formatted Fortran code all # characters from position 72 are to be considered as comment. A common # extension is to allow longer lines before the automatic comment starts. The # setting FORTRAN_COMMENT_AFTER will also make it possible that longer lines can # be processed before the automatic comment starts. # Minimum value: 7, maximum value: 10000, default value: 72. FORTRAN_COMMENT_AFTER = 72 #--------------------------------------------------------------------------- # Configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will be # generated. Documented entities will be cross-referenced with these sources. # # Note: To get rid of all source code in the generated output, make sure that # also VERBATIM_HEADERS is set to NO. # The default value is: NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body of functions, # classes and enums directly into the documentation. # The default value is: NO. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any # special comment blocks from generated source code fragments. Normal C, C++ and # Fortran comments will always remain visible. # The default value is: YES. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES then for each documented # entity all documented functions referencing it will be listed. # The default value is: NO. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES then for each documented function # all documented entities called/used by that function will be listed. # The default value is: NO. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set # to YES then the hyperlinks from functions in REFERENCES_RELATION and # REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will # link to the documentation. # The default value is: YES. REFERENCES_LINK_SOURCE = YES # If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the # source code will show a tooltip with additional information such as prototype, # brief description and links to the definition and documentation. Since this # will make the HTML file larger and loading of large files a bit slower, you # can opt to disable this feature. # The default value is: YES. # This tag requires that the tag SOURCE_BROWSER is set to YES. SOURCE_TOOLTIPS = YES # If the USE_HTAGS tag is set to YES then the references to source code will # point to the HTML generated by the htags(1) tool instead of doxygen built-in # source browser. The htags tool is part of GNU's global source tagging system # (see https://www.gnu.org/software/global/global.html). You will need version # 4.8.6 or higher. # # To use it do the following: # - Install the latest version of global # - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file # - Make sure the INPUT points to the root of the source tree # - Run doxygen as normal # # Doxygen will invoke htags (and that will in turn invoke gtags), so these # tools must be available from the command line (i.e. in the search path). # # The result: instead of the source browser generated by doxygen, the links to # source code will now point to the output of htags. # The default value is: NO. # This tag requires that the tag SOURCE_BROWSER is set to YES. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a # verbatim copy of the header file for each class for which an include is # specified. Set to NO to disable this. # See also: Section \class. # The default value is: YES. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all # compounds will be generated. Enable this if the project contains a lot of # classes, structs, unions or interfaces. # The default value is: YES. ALPHABETICAL_INDEX = YES # In case all classes in a project start with a common prefix, all classes will # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag # can be used to specify a prefix (or a list of prefixes) that should be ignored # while generating the index headers. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. IGNORE_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output # The default value is: YES. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for each # generated HTML page (for example: .htm, .php, .asp). # The default value is: .html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a user-defined HTML header file for # each generated HTML page. If the tag is left blank doxygen will generate a # standard header. # # To get valid HTML the header file that includes any scripts and style sheets # that doxygen needs, which is dependent on the configuration options used (e.g. # the setting GENERATE_TREEVIEW). It is highly recommended to start with a # default header using # doxygen -w html new_header.html new_footer.html new_stylesheet.css # YourConfigFile # and then modify the file new_header.html. See also section "Doxygen usage" # for information on how to generate the default header that doxygen normally # uses. # Note: The header is subject to change so you typically have to regenerate the # default header when upgrading to a newer version of doxygen. For a description # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard # footer. See HTML_HEADER for more information on how to generate a default # footer and what special commands can be used inside the footer. See also # section "Doxygen usage" for information on how to generate the default footer # that doxygen normally uses. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading style # sheet that is used by each HTML page. It can be used to fine-tune the look of # the HTML output. If left blank doxygen will generate a default style sheet. # See also section "Doxygen usage" for information on how to generate the style # sheet that doxygen normally uses. # Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as # it is more robust and this tag (HTML_STYLESHEET) will in the future become # obsolete. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_STYLESHEET = # The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined # cascading style sheets that are included after the standard style sheets # created by doxygen. Using this option one can overrule certain style aspects. # This is preferred over using HTML_STYLESHEET since it does not replace the # standard style sheet and is therefore more robust against future updates. # Doxygen will copy the style sheet files to the output directory. # Note: The order of the extra style sheet files is of importance (e.g. the last # style sheet in the list overrules the setting of the previous ones in the # list). For an example see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that the # files will be copied as-is; there are no commands or markers available. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_FILES = # The HTML_COLORSTYLE tag can be used to specify if the generated HTML output # should be rendered with a dark or light theme. Default setting AUTO_LIGHT # enables light output unless the user preference is dark output. Other options # are DARK to always use dark mode, LIGHT to always use light mode, AUTO_DARK to # default to dark mode unless the user prefers light mode, and TOGGLE to let the # user toggle between dark and light mode via a button. # Possible values are: LIGHT Always generate light output., DARK Always generate # dark output., AUTO_LIGHT Automatically set the mode according to the user # preference, use light mode if no preference is set (the default)., AUTO_DARK # Automatically set the mode according to the user preference, use dark mode if # no preference is set. and TOGGLE Allow to user to switch between light and # dark mode via a button.. # The default value is: AUTO_LIGHT. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE = AUTO_LIGHT # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to # this color. Hue is specified as an angle on a color-wheel, see # https://en.wikipedia.org/wiki/Hue for more information. For instance the value # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 # purple, and 360 is red again. # Minimum value: 0, maximum value: 359, default value: 220. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors # in the HTML output. For a value of 0 the output will use gray-scales only. A # value of 255 will produce the most vivid colors. # Minimum value: 0, maximum value: 255, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the # luminance component of the colors in the HTML output. Values below 100 # gradually make the output lighter, whereas values above 100 make the output # darker. The value divided by 100 is the actual gamma applied, so 80 represents # a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not # change the gamma. # Minimum value: 40, maximum value: 240, default value: 80. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting this # to YES can help to show when doxygen was last run and thus if the # documentation is up to date. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_TIMESTAMP = NO # If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML # documentation will contain a main index with vertical navigation menus that # are dynamically created via JavaScript. If disabled, the navigation index will # consists of multiple levels of tabs that are statically embedded in every HTML # page. Disable this option to support browsers that do not have JavaScript, # like the Qt help browser. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_DYNAMIC_MENUS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries # shown in the various tree structured indices initially; the user can expand # and collapse entries dynamically later on. Doxygen will expand the tree to # such a level that at most the specified number of entries are visible (unless # a fully collapsed tree already exceeds this amount). So setting the number of # entries 1 will produce a full collapsed tree by default. 0 is a special value # representing an infinite number of entries and will result in a full expanded # tree by default. # Minimum value: 0, maximum value: 9999, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development # environment (see: # https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To # create a documentation set, doxygen will generate a Makefile in the HTML # output directory. Running make will produce the docset in that directory and # running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at # startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy # genXcode/_index.html for more information. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_DOCSET = NO # This tag determines the name of the docset feed. A documentation feed provides # an umbrella under which multiple documentation sets from a single provider # (such as a company or product suite) can be grouped. # The default value is: Doxygen generated docs. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_FEEDNAME = "Doxygen generated docs" # This tag determines the URL of the docset feed. A documentation feed provides # an umbrella under which multiple documentation sets from a single provider # (such as a company or product suite) can be grouped. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_FEEDURL = # This tag specifies a string that should uniquely identify the documentation # set bundle. This should be a reverse domain-name style string, e.g. # com.mycompany.MyDocSet. Doxygen will append .docset to the name. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_BUNDLE_ID = org.doxygen.Project # The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. # The default value is: org.doxygen.Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. # The default value is: Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop # on Windows. In the beginning of 2021 Microsoft took the original page, with # a.o. the download links, offline the HTML help workshop was already many years # in maintenance mode). You can download the HTML help workshop from the web # archives at Installation executable (see: # http://web.archive.org/web/20160201063255/http://download.microsoft.com/downlo # ad/0/A/9/0A939EF6-E31C-430F-A3DF-DFAE7960D564/htmlhelp.exe). # # The HTML Help Workshop contains a compiler that can convert all HTML output # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML # files are now used as the Windows 98 help format, and will replace the old # Windows help format (.hlp) on all Windows platforms in the future. Compressed # HTML files also contain an index, a table of contents, and you can search for # words in the documentation. The HTML workshop also contains a viewer for # compressed HTML files. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_HTMLHELP = NO # The CHM_FILE tag can be used to specify the file name of the resulting .chm # file. You can add a path in front of the file if the result should not be # written to the html output directory. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_FILE = # The HHC_LOCATION tag can be used to specify the location (absolute path # including file name) of the HTML help compiler (hhc.exe). If non-empty, # doxygen will try to run the HTML help compiler on the generated index.hhp. # The file has to be specified with full path. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. HHC_LOCATION = # The GENERATE_CHI flag controls if a separate .chi index file is generated # (YES) or that it should be included in the main .chm file (NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. GENERATE_CHI = NO # The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) # and project file content. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_INDEX_ENCODING = # The BINARY_TOC flag controls whether a binary table of contents is generated # (YES) or a normal table of contents (NO) in the .chm file. Furthermore it # enables the Previous and Next buttons. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members to # the table of contents of the HTML help documentation and to the tree view. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that # can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help # (.qch) of the generated HTML documentation. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify # the file name of the resulting .qch file. The path specified is relative to # the HTML output folder. # This tag requires that the tag GENERATE_QHP is set to YES. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace # (see: # https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual # Folders (see: # https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom # Filters (see: # https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom # Filters (see: # https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's filter section matches. Qt Help Project / Filter Attributes (see: # https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_SECT_FILTER_ATTRS = # The QHG_LOCATION tag can be used to specify the location (absolute path # including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to # run qhelpgenerator on the generated .qhp file. # This tag requires that the tag GENERATE_QHP is set to YES. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be # generated, together with the HTML files, they form an Eclipse help plugin. To # install this plugin and make it available under the help contents menu in # Eclipse, the contents of the directory containing the HTML and XML files needs # to be copied into the plugins directory of eclipse. The name of the directory # within the plugins directory should be the same as the ECLIPSE_DOC_ID value. # After copying Eclipse needs to be restarted before the help appears. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_ECLIPSEHELP = NO # A unique identifier for the Eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have this # name. Each documentation set should have its own identifier. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. ECLIPSE_DOC_ID = org.doxygen.Project # If you want full control over the layout of the generated HTML pages it might # be necessary to disable the index and replace it with your own. The # DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top # of each HTML page. A value of NO enables the index and the value YES disables # it. Since the tabs in the index contain the same information as the navigation # tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. If the tag # value is set to YES, a side panel will be generated containing a tree-like # index structure (just like the one that is generated for HTML Help). For this # to work a browser that supports JavaScript, DHTML, CSS and frames is required # (i.e. any modern browser). Windows users are probably better off using the # HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can # further fine tune the look of the index (see "Fine-tuning the output"). As an # example, the default style sheet generated by doxygen has an example that # shows how to put an image at the root of the tree instead of the PROJECT_NAME. # Since the tree basically has the same information as the tab index, you could # consider setting DISABLE_INDEX to YES when enabling this option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_TREEVIEW = NO # When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the # FULL_SIDEBAR option determines if the side bar is limited to only the treeview # area (value NO) or if it should extend to the full height of the window (value # YES). Setting this to YES gives a layout similar to # https://docs.readthedocs.io with more room for contents, but less room for the # project logo, title, and description. If either GENERATE_TREEVIEW or # DISABLE_INDEX is set to NO, this option has no effect. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. FULL_SIDEBAR = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that # doxygen will group on one line in the generated HTML documentation. # # Note that a value of 0 will completely suppress the enum values from appearing # in the overview section. # Minimum value: 0, maximum value: 20, default value: 4. # This tag requires that the tag GENERATE_HTML is set to YES. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used # to set the initial width (in pixels) of the frame in which the tree is shown. # Minimum value: 0, maximum value: 1500, default value: 250. # This tag requires that the tag GENERATE_HTML is set to YES. TREEVIEW_WIDTH = 250 # If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to # external symbols imported via tag files in a separate window. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. EXT_LINKS_IN_WINDOW = NO # If the OBFUSCATE_EMAILS tag is set to YES, doxygen will obfuscate email # addresses. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. OBFUSCATE_EMAILS = YES # If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg # tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see # https://inkscape.org) to generate formulas as SVG images instead of PNGs for # the HTML output. These images will generally look nicer at scaled resolutions. # Possible values are: png (the default) and svg (looks nicer but requires the # pdf2svg or inkscape tool). # The default value is: png. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FORMULA_FORMAT = png # Use this tag to change the font size of LaTeX formulas included as images in # the HTML documentation. When you change the font size after a successful # doxygen run you need to manually remove any form_*.png images from the HTML # output directory to force them to be regenerated. # Minimum value: 8, maximum value: 50, default value: 10. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_FONTSIZE = 10 # The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands # to create new LaTeX commands to be used in formulas as building blocks. See # the section "Including formulas" for details. FORMULA_MACROFILE = # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see # https://www.mathjax.org) which uses client side JavaScript for the rendering # instead of using pre-rendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path # to it using the MATHJAX_RELPATH option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. USE_MATHJAX = YES # With MATHJAX_VERSION it is possible to specify the MathJax version to be used. # Note that the different versions of MathJax have different requirements with # regards to the different settings, so it is possible that also other MathJax # settings have to be changed when switching between the different MathJax # versions. # Possible values are: MathJax_2 and MathJax_3. # The default value is: MathJax_2. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_VERSION = MathJax_2 # When MathJax is enabled you can set the default output format to be used for # the MathJax output. For more details about the output format see MathJax # version 2 (see: # http://docs.mathjax.org/en/v2.7-latest/output.html) and MathJax version 3 # (see: # http://docs.mathjax.org/en/latest/web/components/output.html). # Possible values are: HTML-CSS (which is slower, but has the best # compatibility. This is the name for Mathjax version 2, for MathJax version 3 # this will be translated into chtml), NativeMML (i.e. MathML. Only supported # for NathJax 2. For MathJax version 3 chtml will be used instead.), chtml (This # is the name for Mathjax version 3, for MathJax version 2 this will be # translated into HTML-CSS) and SVG. # The default value is: HTML-CSS. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_FORMAT = HTML-CSS # When MathJax is enabled you need to specify the location relative to the HTML # output directory using the MATHJAX_RELPATH option. The destination directory # should contain the MathJax.js script. For instance, if the mathjax directory # is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax # Content Delivery Network so you can quickly see the result without installing # MathJax. However, it is strongly recommended to install a local copy of # MathJax from https://www.mathjax.org before deployment. The default value is: # - in case of MathJax version 2: https://cdn.jsdelivr.net/npm/mathjax@2 # - in case of MathJax version 3: https://cdn.jsdelivr.net/npm/mathjax@3 # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_RELPATH = https://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax # extension names that should be enabled during MathJax rendering. For example # for MathJax version 2 (see # https://docs.mathjax.org/en/v2.7-latest/tex.html#tex-and-latex-extensions): # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols # For example for MathJax version 3 (see # http://docs.mathjax.org/en/latest/input/tex/extensions/index.html): # MATHJAX_EXTENSIONS = ams # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_EXTENSIONS = # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces # of code that will be used on startup of the MathJax code. See the MathJax site # (see: # http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an # example see the documentation. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_CODEFILE = # When the SEARCHENGINE tag is enabled doxygen will generate a search box for # the HTML output. The underlying search engine uses javascript and DHTML and # should work on any modern browser. Note that when using HTML help # (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) # there is already a search function so this one should typically be disabled. # For large projects the javascript based search engine can be slow, then # enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to # search using the keyboard; to jump to the search box use + S # (what the is depends on the OS and browser, but it is typically # , /