pax_global_header00006660000000000000000000000064141111522400014501gustar00rootroot0000000000000052 comment=e58d75792d20bb955de6e3e261af34fb31ca7040 basix-0.3.0/000077500000000000000000000000001411115224000126075ustar00rootroot00000000000000basix-0.3.0/.clang-format000066400000000000000000000053651411115224000151730ustar00rootroot00000000000000--- Language: Cpp # BasedOnStyle: LLVM AccessModifierOffset: -2 AlignAfterOpenBracket: Align AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlinesLeft: false AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: true AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: All AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterDefinitionReturnType: None AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false AlwaysBreakTemplateDeclarations: true BinPackArguments: true BinPackParameters: true BraceWrapping: AfterClass: false AfterControlStatement: false AfterEnum: false AfterFunction: false AfterNamespace: false AfterObjCDeclaration: false AfterStruct: false AfterUnion: false BeforeCatch: false BeforeElse: false IndentBraces: false BreakBeforeBinaryOperators: All BreakBeforeBraces: Allman BreakBeforeTernaryOperators: true BreakConstructorInitializersBeforeComma: false BreakAfterJavaFieldAnnotations: false BreakStringLiterals: true ColumnLimit: 80 CommentPragmas: '^ IWYU pragma:' ConstructorInitializerAllOnOneLineOrOnePerLine: false ConstructorInitializerIndentWidth: 4 ContinuationIndentWidth: 4 Cpp11BracedListStyle: true DerivePointerAlignment: false DisableFormat: false ExperimentalAutoDetectBinPacking: false ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] IncludeCategories: - Regex: '^"(llvm|llvm-c|clang|clang-c)/' Priority: 2 - Regex: '^(<|"(gtest|isl|json)/)' Priority: 3 - Regex: '.*' Priority: 1 IncludeIsMainRegex: '$' IndentCaseLabels: false IndentWidth: 2 IndentWrappedFunctionNames: false JavaScriptQuotes: Leave JavaScriptWrapImports: true KeepEmptyLinesAtTheStartOfBlocks: true MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 NamespaceIndentation: None ObjCBlockIndentWidth: 2 ObjCSpaceAfterProperty: false ObjCSpaceBeforeProtocolList: true PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 60 PointerAlignment: Left ReflowComments: true SortIncludes: true SpaceAfterCStyleCast: false SpaceAfterTemplateKeyword: true SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesBeforeTrailingComments: 1 SpacesInAngles: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp11 TabWidth: 8 UseTab: Never ... basix-0.3.0/.github/000077500000000000000000000000001411115224000141475ustar00rootroot00000000000000basix-0.3.0/.github/workflows/000077500000000000000000000000001411115224000162045ustar00rootroot00000000000000basix-0.3.0/.github/workflows/dolfin-tests.yml000066400000000000000000000031611411115224000213430ustar00rootroot00000000000000# This workflow will install Basix, FFCx, DOLFINx and run the DOLFINx unit tests. name: DOLFINx integration on: pull_request jobs: build: name: Run DOLFINx tests runs-on: ubuntu-latest container: fenicsproject/test-env:latest-openmpi env: CC: clang-12 CXX: clang++-12 PETSC_ARCH: linux-gnu-complex-32 OMPI_ALLOW_RUN_AS_ROOT: 1 OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 OMPI_MCA_rmaps_base_oversubscribe: 1 OMPI_MCA_plm: isolated OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_mpi_yield_when_idle: 1 OMPI_MCA_hwloc_base_binding_policy: none steps: - uses: actions/checkout@v2 - name: Install basix run: | cmake -G Ninja -DCMAKE_BUILD_TYPE=Developer -B build-dir -S . cmake --build build-dir cmake --install build-dir python3 -m pip install ./python - name: Install FEniCS Python components run: | python3 -m pip install git+https://github.com/FEniCS/ufl.git python3 -m pip install git+https://github.com/FEniCS/ffcx.git - name: Get DOLFINx uses: actions/checkout@v2 with: path: ./dolfinx repository: FEniCS/dolfinx ref: main - name: Install DOLFINx run: | cmake -G Ninja -DCMAKE_BUILD_TYPE=Developer -B build -S dolfinx/cpp/ cmake --build build cmake --install build python3 -m pip -v install --global-option build --global-option --debug dolfinx/python/ - name: Run DOLFINx unit tests run: python3 -m pytest -v -n auto dolfinx/python/test/unit basix-0.3.0/.github/workflows/intel.yml000066400000000000000000000037611411115224000200510ustar00rootroot00000000000000name: Intel compilers on: schedule: # '*' is a special character in YAML, so string must be quoted - cron: "0 2 * * TUE" workflow_dispatch: ~ jobs: build: name: Build and test runs-on: ubuntu-20.04 defaults: run: shell: bash strategy: matrix: include: - compiler: "Classic" c_compiler: icc cxx_compiler: icpc - compiler: "LLVM-based" c_compiler: icx cxx_compiler: icpx env: CC: ${{ matrix.c_compiler }} CXX: ${{ matrix.cxx_compiler }} DEBIAN_FRONTEND: noninteractive steps: - uses: actions/checkout@v2 - name: Set up Python uses: actions/setup-python@v2 with: python-version: 3.8 - name: Install pybind11 run: | pip install pybind11 - name: Install dependencies (non-Python, Linux) run: | sudo apt-get install -y ninja-build - name: Install Intel compilers run: | wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB echo "deb https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list sudo apt update sudo apt install -y intel-oneapi-common-vars intel-oneapi-compiler-dpcpp-cpp-and-cpp-classic - name: Install Basix C++ run: | . /opt/intel/oneapi/setvars.sh cmake -G Ninja -DCMAKE_BUILD_TYPE=Developer -B build-dir -S . cmake --build build-dir sudo cmake --install build-dir - name: Build Basix Python run: | . /opt/intel/oneapi/setvars.sh cd python pip install . - name: Run units tests run: | . /opt/intel/oneapi/setvars.sh pip install pytest pytest-xdist numpy sympy numba scipy pytest -v -n auto --durations 20 test/ basix-0.3.0/.github/workflows/pythonapp.yml000066400000000000000000000107641411115224000207610ustar00rootroot00000000000000# This workflow will install Python dependencies, run tests and lint # with a single version of Python For more information see: # https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions name: Basix CI on: push: branches: - "**" tags: - "*.*.*.*" - "*.*.*" pull_request: branches: - main jobs: build: name: Build and test runs-on: ${{ matrix.os }} strategy: matrix: os: [ubuntu-20.04] python-version: [3.8] steps: - uses: actions/checkout@v2 - name: Set up Python uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: Install pybind11 run: pip install pybind11 - name: Install dependencies (non-Python, Linux) if: runner.os == 'Linux' run: | sudo apt-get install -y doxygen libopenblas-dev liblapack-dev ninja-build - name: Install dependencies (non-Python, macOS) if: runner.os == 'macOS' run: brew install ninja - name: Lint with flake8 run: | pip install flake8 flake8 --statistics test/ flake8 --statistics python/ - name: pydocstyle checks run: | pip install pydocstyle python -m pydocstyle python/basix - name: Install Basix C++ run: | cmake -G Ninja -DCMAKE_BUILD_TYPE=Developer -B build-dir -S . cmake --build build-dir sudo cmake --install build-dir - name: Build Basix Python run: | cd python pip install . - name: Run units tests run: | pip install pytest pytest-xdist numpy sympy numba scipy pytest -v -n auto --durations 20 test/ - name: Build documentation if: runner.os == 'Linux' run: | pip install markdown pip install sphinx sphinx_rtd_theme export BASIX_VERSION=`python3 -c "import basix; print(basix.__version__)"` cd doc && python make_html.py - name: Upload C++ documentation artifact if: runner.os == 'Linux' uses: actions/upload-artifact@v2 with: name: doc-cpp path: | doc/html/cpp retention-days: 2 if-no-files-found: error - name: Upload Python documentation artifact if: runner.os == 'Linux' uses: actions/upload-artifact@v2 with: name: doc-python path: | doc/html/python retention-days: 2 if-no-files-found: error - name: Set version name if: ${{ github.repository == 'FEniCS/basix' && ( github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') ) && runner.os == 'Linux' }} run: | echo "VERSION_NAME=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - name: Build documentation to upload if: ${{ github.repository == 'FEniCS/basix' && ( github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') ) && runner.os == 'Linux' }} run: | export BASIX_VERSION=`python3 -c "import basix; print(basix.__version__)"` cd doc && python make_html.py --url https://docs.fenicsproject.org/basix/${{ env.VERSION_NAME }} - name: Checkout FEniCS/docs if: ${{ github.repository == 'FEniCS/basix' && ( github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') ) && runner.os == 'Linux' }} uses: actions/checkout@v2 with: repository: "FEniCS/docs" path: "docs" ssh-key: "${{ secrets.SSH_GITHUB_DOCS_PRIVATE_KEY }}" - name: Copy documentation into repository if: ${{ github.repository == 'FEniCS/basix' && ( github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') ) && runner.os == 'Linux' }} run: | cd docs git rm -r --ignore-unmatch basix/${{ env.VERSION_NAME }} mkdir -p basix/${{ env.VERSION_NAME }} cp -r ../doc/html/* basix/${{ env.VERSION_NAME }} - name: Commit and push documentation to FEniCS/docs if: ${{ github.repository == 'FEniCS/basix' && ( github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') ) && runner.os == 'Linux' }} run: | cd docs git config --global user.email "fenics@github.com" git config --global user.name "FEniCS GitHub Actions" git add --all git commit --allow-empty -m "Update Basix docs FEniCS/basix@${{ github.sha }}" git push basix-0.3.0/.gitignore000066400000000000000000000005251411115224000146010ustar00rootroot00000000000000*.pyc *~ *.so cpp/Makefile cpp/CMakeCache.txt cpp/CMakeFiles/* cpp/cmake_install.cmake cpp/basix/version.h doc/python/build/* doc/python/source/generated/* python/testing_build/ doc/cpp/html/* doc/cpp/latex/* doc/html/* doc/_temp doc/python/source/_templates/layout.html doc/cpp/header.html build*/ .*.swp *.egg-info _skbuild/* dist/* basix-0.3.0/BasixConfig.cmake.in000066400000000000000000000003251411115224000164120ustar00rootroot00000000000000@PACKAGE_INIT@ include(CMakeFindDependencyMacro) # xtensor find_dependency(xtl) find_dependency(xtensor) if(NOT TARGET @PROJECT_NAME@) include("${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@Targets.cmake") endif() basix-0.3.0/CMakeLists.txt000066400000000000000000000213301411115224000153460ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.16) # Set the version project(Basix VERSION "0.3.0" LANGUAGES CXX) include(GNUInstallDirs) include(FeatureSummary) # Enable SIMD with xtensor option(XTENSOR_USE_XSIMD "Enable SIMD with xtensor" OFF) add_feature_info(XTENSOR_USE_XSIMD XTENSOR_USE_XSIMD "Enable SIMD with xtensor.") # Enable xtensor with target-specific optimization, i.e. -march=native option(XTENSOR_OPTIMIZE "Enable xtensor target-specific optimization" OFF) add_feature_info(XTENSOR_OPTIMIZE XTENSOR_OPTIMIZE "Enable architecture-specific optimizations as defined by xtensor.") # Set the C++ standard set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) # Options option(BUILD_SHARED_LIBS "Build Basix with shared libraries." ON) add_feature_info(BUILD_SHARED_LIBS BUILD_SHARED_LIBS "Build Basix with shared libraries.") # Find dependecies set(XTENSOR_MIN_VERSION 0.23.10) set(XTL_MIN_VERSION 0.7.0) set(XSIMD_MIN_VERSION 7.4.10) include(FetchContent) # xsimd (optional) if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.19) find_package(xsimd ${XSIMD_MIN_VERSION}...<8.0 QUIET) else() find_package(xsimd ${XSIMD_MIN_VERSION} QUIET) endif() if(XTENSOR_USE_XSIMD AND NOT xsimd_FOUND) message("downloading xsimd source...") FetchContent_Declare( xsimd GIT_REPOSITORY https://github.com/xtensor-stack/xsimd.git GIT_TAG 7.4.10 ) FetchContent_MakeAvailable(xsimd) else() message("found xsimd ${xsimd_VERSION}") endif() # xtl, xtensor, xtensor-blas (required) if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.19) find_package(xtl ${XTL_MIN_VERSION}...<0.8 QUIET) find_package(xtensor ${XTENSOR_MIN_VERSION}...<0.24 QUIET) else() find_package(xtl ${XTL_MIN_VERSION} QUIET) find_package(xtensor ${XTENSOR_MIN_VERSION} QUIET) endif() find_package(xtensor-blas 0.19 QUIET) if(NOT xtl_FOUND) message("downloading xtl source...") FetchContent_Declare( xtl GIT_REPOSITORY https://github.com/xtensor-stack/xtl.git GIT_TAG 0.7.2 ) FetchContent_MakeAvailable(xtl) else() message("found xtl ${xtl_VERSION}") endif() if(NOT xtensor_FOUND) message("downloading xtensor source...") FetchContent_Declare( xtensor GIT_REPOSITORY https://github.com/xtensor-stack/xtensor.git GIT_TAG 0.23.10 ) FetchContent_MakeAvailable(xtensor) else() message("found xtensor ${xtensor_VERSION}") endif() if(NOT xtensor-blas_FOUND) message("downloading xtensor-blas source...") FetchContent_Declare( xtensor_blas GIT_REPOSITORY https://github.com/xtensor-stack/xtensor-blas.git GIT_TAG 0.19.1 ) FetchContent_MakeAvailable(xtensor_blas) else() message("found xtensor-blas ${xtensor-blas_VERSION}") endif() find_package(BLAS REQUIRED) find_package(LAPACK REQUIRED) feature_summary(WHAT ALL) # --Source files add_library(basix) configure_file(${CMAKE_SOURCE_DIR}/cpp/basix/version.h.in ${CMAKE_SOURCE_DIR}/cpp/basix/version.h) set(HEADERS_basix ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/cell.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/dof-transformations.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/element-families.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/finite-element.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/indexing.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/lattice.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/log.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/maps.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/moments.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/polyset.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/precompute.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/quadrature.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/lagrange.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/nce-rtc.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/brezzi-douglas-marini.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/nedelec.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/raviart-thomas.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/regge.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/crouzeix-raviart.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/bubble.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/serendipity.h ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/version.h) target_sources(basix PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/cell.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/dof-transformations.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/element-families.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/finite-element.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/lattice.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/log.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/maps.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/moments.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/polyset.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/precompute.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/quadrature.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/lagrange.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/nce-rtc.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/brezzi-douglas-marini.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/nedelec.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/raviart-thomas.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/regge.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/crouzeix-raviart.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/bubble.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/basix/serendipity.cpp) # Configure the library set_target_properties(basix PROPERTIES PUBLIC_HEADER cpp/basix/finite-element.h) set_target_properties(basix PROPERTIES PRIVATE_HEADER "${HEADERS_basix}") target_include_directories(basix PUBLIC $ "$") # The commented lines need CMake >= 3.18 # target_link_libraries(basix PRIVATE BLAS::BLAS) # target_link_libraries(basix PRIVATE LAPACK::LAPACK) target_link_libraries(basix PRIVATE ${BLAS_LIBRARIES}) target_link_libraries(basix PRIVATE ${LAPACK_LIBRARIES}) # xtensor and related packages target_link_libraries(basix PUBLIC xtl) # Note: we use get_target_property/set_target_properties to ensure that # that -isystem flag is applied to allow us to use strict compiler flags get_target_property(XTENSOR_INC_SYS xtensor INTERFACE_INCLUDE_DIRECTORIES) set_target_properties(xtensor PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${XTENSOR_INC_SYS}") target_link_libraries(basix PUBLIC xtensor) # Add xsimd definition to Basix so that Basix will export this option so # that other libraries will know that Basix was compiled with xsimd get_target_property(XTENSOR_DEFN xtensor INTERFACE_COMPILE_DEFINITIONS) if("XTENSOR_USE_XSIMD" IN_LIST XTENSOR_DEFN) target_compile_definitions(basix PUBLIC XTENSOR_USE_XSIMD) elseif(TARGET xsimd AND XTENSOR_USE_XSIMD) target_compile_definitions(basix PUBLIC XTENSOR_USE_XSIMD) target_link_libraries(basix PRIVATE xsimd) endif() # Handle -march=native if (XTENSOR_OPTIMIZE AND TARGET xtensor::optimize) target_link_libraries(basix PUBLIC xtensor::optimize) elseif(XTENSOR_OPTIMIZE) target_compile_options(basix PUBLIC -march=native) endif() # Note: we use get_target_property/set_target_properties to ensure that # that -isystem flag is applied to allow us to use strict compiler flags get_target_property(XTENSORBLAS_INC_SYS xtensor-blas INTERFACE_INCLUDE_DIRECTORIES) set_target_properties(xtensor-blas PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${XTENSORBLAS_INC_SYS}") target_link_libraries(basix PRIVATE xtensor-blas) # Set compiler flags list(APPEND BASIX_DEVELOPER_FLAGS -O2;-g;-pipe) list(APPEND basix_compiler_flags -Wall;-Werror;-Wextra;-Wno-comment;-pedantic;) target_compile_options(basix PRIVATE "$<$,$>:${basix_compiler_flags}>") target_compile_options(basix PRIVATE $<$:${BASIX_DEVELOPER_FLAGS}>) # Set debug definitions (private) target_compile_definitions(basix PRIVATE $<$,$>:DEBUG>) # Install the Basix library install(TARGETS basix EXPORT BasixTargets PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} PRIVATE_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/basix RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT RuntimeExecutables LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT RuntimeLibraries ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT Development) # Install CMake helpers include(CMakePackageConfigHelpers) write_basic_package_version_file(BasixConfigVersion.cmake VERSION ${PACKAGE_VERSION} COMPATIBILITY AnyNewerVersion) configure_package_config_file(BasixConfig.cmake.in ${CMAKE_BINARY_DIR}/BasixConfig.cmake INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/basix) install(FILES ${CMAKE_BINARY_DIR}/BasixConfig.cmake ${CMAKE_BINARY_DIR}/BasixConfigVersion.cmake DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/basix COMPONENT Development) install(EXPORT BasixTargets FILE BasixTargets.cmake NAMESPACE Basix:: DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/basix) basix-0.3.0/CODE_OF_CONDUCT.md000066400000000000000000000072501411115224000154120ustar00rootroot00000000000000Code of Conduct =============== Our Pledge ---------- In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. Our Standards ------------- Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others’ private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting Our Responsibilities -------------------- Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. Scope ----- This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. Enforcement ----------- Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fenics-steering-council@googlegroups.com. Alternatively, you may report individually to one of the members of the Steering Council. Complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project’s leadership. If you feel that your report has not been followed up satisfactorily, then you may contact our parent organisation NumFOCUS at info@numfocus.org for further redress. Attribution ----------- This Code of Conduct is adapted from the Contributor Covenant, version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html. Adaptations ----------- * Allow reporting to individual Steering Council members * Added the option to contact NumFOCUS for further redress. For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faqbasix-0.3.0/INSTALL.md000066400000000000000000000023241411115224000142400ustar00rootroot00000000000000# Installation instructions ## Installing the Basix C++ library To install Basix, you must first install the C++ interface and library: ```bash cmake -DCMAKE_BUILD_TYPE=Release -B build-dir -S . cmake --build build-dir cmake --install build-dir ``` You may need to use `sudo` for the final install step. Using the CMake build type `Release` is strongly recommended for performance. ## Installing the Python interface To use Basix via Python, you will need to install the Basix Python interface. First, you will need to install pybind11: `pip install pybind11`. You can then install the Basix Python interface with: ```bash cd python pip install . ``` ## Running the Basix tests Once you have installed the Basix Python interface, you can run the Basix tests to check that everything is running correctly. First, install pytest: `pip install pytest`. You can then run the tests with: ```bash pytest test/ ``` ## Dependencies Basix depends on [`xtensor`](https://github.com/xtensor-stack/xtensor) and [`xtensor-blas`](https://github.com/xtensor-stack/xtensor-blas). CMake will download install these packages if they cannot be found. Building the Python interface required [`pybind11`](https://github.com/pybind/pybind11).basix-0.3.0/LICENSE000066400000000000000000000020571411115224000136200ustar00rootroot00000000000000MIT License Copyright (c) 2020 FEniCS Project Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. basix-0.3.0/README.md000066400000000000000000000105371411115224000140740ustar00rootroot00000000000000# Basix ![Basix CI](https://github.com/FEniCS/basix/workflows/Basix%20CI/badge.svg) Basix is a finite element definition and tabulation runtime library. ## Installing Basix Basix can be installed by following [these instructions](INSTALL.md) ## Documentation Documentation of Basix can be found at https://docs.fenicsproject.org/basix/main/. ## Supported elements ### Interval In Basix, the sub-entities of the reference interval are numbered as follows: ![The numbering of a reference interval](img/interval_numbering.png) The following elements are supported on a interval: - [Lagrange](https://defelement.com/elements/lagrange.html) - [Bubble](https://defelement.com/elements/bubble.html) - [DPC](https://defelement.com/elements/dpc.html) - [Serendipity](https://defelement.com/elements/serendipity.html) ### Triangle In Basix, the sub-entities of the reference triangle are numbered as follows: ![The numbering of a reference triangle](img/triangle_numbering.png) The following elements are supported on a triangle: - [Lagrange](https://defelement.com/elements/lagrange.html) - [Nédélec first kind](https://defelement.com/elements/nedelec1.html) - [Raviart-Thomas](https://defelement.com/elements/raviart-thomas.html) - [Nédélec second kind](https://defelement.com/elements/nedelec2.html) - [Brezzi-Douglas-Marini](https://defelement.com/elements/brezzi-douglas-marini.html) - [Regge](https://defelement.com/elements/regge.html) - [Crouzeix-Raviart](https://defelement.com/elements/crouzeix-raviart.html) - [Bubble](https://defelement.com/elements/bubble.html) ### Quadrilateral In Basix, the sub-entities of the reference quadrilateral are numbered as follows: ![The numbering of a reference quadrilateral](img/quadrilateral_numbering.png) The following elements are supported on a quadrilateral: - [Lagrange](https://defelement.com/elements/lagrange.html) - [Nédélec first kind](https://defelement.com/elements/nedelec1.html) - [Raviart-Thomas](https://defelement.com/elements/qdiv.html) - [Nédélec second kind](https://defelement.com/elements/scurl.html) - [Brezzi-Douglas-Marini](https://defelement.com/elements/sdiv.html) - [Bubble](https://defelement.com/elements/bubble.html) - [DPC](https://defelement.com/elements/dpc.html) - [Serendipity](https://defelement.com/elements/serendipity.html) ### Tetrahedron In Basix, the sub-entities of the reference tetrahedron are numbered as follows: ![The numbering of a reference tetrahedron](img/tetrahedron_numbering.png) The following elements are supported on a tetrahedron: - [Lagrange](https://defelement.com/elements/lagrange.html) - [Nédélec first kind](https://defelement.com/elements/nedelec1.html) - [Raviart-Thomas](https://defelement.com/elements/raviart-thomas.html) - [Nédélec second kind](https://defelement.com/elements/nedelec2.html) - [Brezzi-Douglas-Marini](https://defelement.com/elements/brezzi-douglas-marini.html) - [Regge](https://defelement.com/elements/regge.html) - [Crouzeix-Raviart](https://defelement.com/elements/crouzeix-raviart.html) - [Bubble](https://defelement.com/elements/bubble.html) ### Hexahedron In Basix, the sub-entities of the reference hexahedron are numbered as follows: ![The numbering of a reference hexahedron](img/hexahedron_numbering.png) The following elements are supported on a hexahedron: - [Lagrange](https://defelement.com/elements/lagrange.html) - [Nédélec first kind](https://defelement.com/elements/nedelec1.html) - [Raviart-Thomas](https://defelement.com/elements/qdiv.html) - [Nédélec second kind](https://defelement.com/elements/scurl.html) - [Brezzi-Douglas-Marini](https://defelement.com/elements/sdiv.html) - [Bubble](https://defelement.com/elements/bubble.html) - [DPC](https://defelement.com/elements/dpc.html) - [Serendipity](https://defelement.com/elements/serendipity.html) ### Prism In Basix, the sub-entities of the reference prism are numbered as follows: ![The numbering of a reference prism](img/prism_numbering.png) The following elements are supported on a prism: - [Lagrange](https://defelement.com/elements/lagrange.html) ### Pyramid In Basix, the sub-entities of the reference pyramid are numbered as follows: ![The numbering of a reference pyramid](img/pyramid_numbering.png) The following elements are supported on a pyramid: - [Lagrange](https://defelement.com/elements/lagrange.html) basix-0.3.0/cpp/000077500000000000000000000000001411115224000133715ustar00rootroot00000000000000basix-0.3.0/cpp/basix/000077500000000000000000000000001411115224000144775ustar00rootroot00000000000000basix-0.3.0/cpp/basix/brezzi-douglas-marini.cpp000066400000000000000000000054441411115224000214300ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson & Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #include "brezzi-douglas-marini.h" #include "element-families.h" #include "lagrange.h" #include "maps.h" #include "moments.h" #include "nedelec.h" #include "polyset.h" #include "quadrature.h" #include #include #include #include #include #include using namespace basix; //---------------------------------------------------------------------------- FiniteElement basix::create_bdm(cell::type celltype, int degree) { if (celltype != cell::type::triangle and celltype != cell::type::tetrahedron) throw std::runtime_error("Unsupported cell type"); const std::size_t tdim = cell::topological_dimension(celltype); const cell::type facettype = sub_entity_type(celltype, tdim - 1, 0); // The number of order (degree) scalar polynomials const std::size_t ndofs = tdim * polyset::dim(celltype, degree); // quadrature degree int quad_deg = 5 * degree; std::array>, 4> M; std::array>, 4> x; // Add integral moments on facets const FiniteElement facet_moment_space = create_dlagrange(facettype, degree); std::tie(x[tdim - 1], M[tdim - 1]) = moments::make_normal_integral_moments( facet_moment_space, celltype, tdim, quad_deg); const xt::xtensor facet_transforms = moments::create_normal_moment_dof_transformations(facet_moment_space); // Add integral moments on interior if (degree > 1) { // Interior integral moment std::tie(x[tdim], M[tdim]) = moments::make_dot_integral_moments( create_nedelec(celltype, degree - 1), celltype, tdim, quad_deg); } const std::vector>> topology = cell::topology(celltype); std::map> entity_transformations; switch (tdim) { case 2: entity_transformations[cell::type::interval] = facet_transforms; break; case 3: entity_transformations[cell::type::interval] = xt::xtensor({1, 0, 0}); entity_transformations[cell::type::triangle] = facet_transforms; break; default: throw std::runtime_error("Invalid topological dimension."); } // Create coefficients for order (degree-1) vector polynomials xt::xtensor coeffs = compute_expansion_coefficients( celltype, xt::eye(ndofs), {M[tdim - 1], M[tdim]}, {x[tdim - 1], x[tdim]}, degree); return FiniteElement(element::family::BDM, celltype, degree, {tdim}, coeffs, entity_transformations, x, M, maps::type::contravariantPiola); } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/brezzi-douglas-marini.h000066400000000000000000000004721411115224000210710ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson & Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include "finite-element.h" namespace basix { /// Create BDM element /// @param celltype /// @param degree FiniteElement create_bdm(cell::type celltype, int degree); } // namespace basix basix-0.3.0/cpp/basix/bubble.cpp000066400000000000000000000125351411115224000164440ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson & Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #include "bubble.h" #include "element-families.h" #include "lattice.h" #include "maps.h" #include "polyset.h" #include "quadrature.h" #include #include #include #include #include #include #include using namespace basix; //---------------------------------------------------------------------------- FiniteElement basix::create_bubble(cell::type celltype, int degree) { switch (celltype) { case cell::type::interval: if (degree < 2) throw std::runtime_error( "Bubble element on an interval must have degree at least 2"); break; case cell::type::triangle: if (degree < 3) throw std::runtime_error( "Bubble element on a triangle must have degree at least 3"); break; case cell::type::tetrahedron: if (degree < 4) throw std::runtime_error( "Bubble element on a tetrahedron must have degree at least 4"); break; case cell::type::quadrilateral: if (degree < 2) throw std::runtime_error("Bubble element on a quadrilateral interval " "must have degree at least 2"); break; case cell::type::hexahedron: if (degree < 2) throw std::runtime_error( "Bubble element on a hexahedron must have degree at least 2"); break; default: throw std::runtime_error("Unsupported cell type"); } const std::size_t tdim = cell::topological_dimension(celltype); std::array>, 4> M; std::array>, 4> x; // Evaluate the expansion polynomials at the quadrature points auto [pts, _wts] = quadrature::make_quadrature("default", celltype, 2 * degree); auto wts = xt::adapt(_wts); const xt::xtensor phi = xt::view( polyset::tabulate(celltype, degree, 0, pts), 0, xt::all(), xt::all()); // The number of order (degree) polynomials const std::size_t psize = phi.shape(1); // Create points at nodes on interior const auto points = lattice::create(celltype, degree, lattice::type::equispaced, false); const std::size_t ndofs = points.shape(0); x[tdim].push_back(points); // Create coefficients for order (degree-1) vector polynomials xt::xtensor phi1; xt::xtensor bubble; std::map> entity_transformations; switch (celltype) { case cell::type::interval: { phi1 = xt::view(polyset::tabulate(celltype, degree - 2, 0, pts), 0, xt::all(), xt::all()); auto p = pts; bubble = p * (1.0 - p); break; } case cell::type::triangle: { phi1 = xt::view(polyset::tabulate(celltype, degree - 3, 0, pts), 0, xt::all(), xt::all()); auto p0 = xt::col(pts, 0); auto p1 = xt::col(pts, 1); bubble = p0 * p1 * (1 - p0 - p1); entity_transformations[cell::type::interval] = xt::xtensor({1, 0, 0}); break; } case cell::type::tetrahedron: { phi1 = xt::view(polyset::tabulate(celltype, degree - 4, 0, pts), 0, xt::all(), xt::all()); auto p0 = xt::col(pts, 0); auto p1 = xt::col(pts, 1); auto p2 = xt::col(pts, 2); bubble = p0 * p1 * p2 * (1 - p0 - p1 - p2); entity_transformations[cell::type::interval] = xt::xtensor({1, 0, 0}); entity_transformations[cell::type::triangle] = xt::xtensor({2, 0, 0}); break; } case cell::type::quadrilateral: { phi1 = xt::view(polyset::tabulate(celltype, degree - 2, 0, pts), 0, xt::all(), xt::all()); auto p0 = xt::col(pts, 0); auto p1 = xt::col(pts, 1); bubble = p0 * (1 - p0) * p1 * (1 - p1); entity_transformations[cell::type::interval] = xt::xtensor({1, 0, 0}); break; } case cell::type::hexahedron: { phi1 = xt::view(polyset::tabulate(celltype, degree - 2, 0, pts), 0, xt::all(), xt::all()); auto p0 = xt::col(pts, 0); auto p1 = xt::col(pts, 1); auto p2 = xt::col(pts, 2); bubble = p0 * (1 - p0) * p1 * (1 - p1) * p2 * (1 - p2); entity_transformations[cell::type::interval] = xt::xtensor({1, 0, 0}); entity_transformations[cell::type::quadrilateral] = xt::xtensor({2, 0, 0}); break; } default: throw std::runtime_error("Unknown cell type."); } xt::xtensor wcoeffs = xt::zeros({ndofs, psize}); for (std::size_t i = 0; i < phi1.shape(1); ++i) { auto integrand = xt::col(phi1, i) * bubble; for (std::size_t k = 0; k < psize; ++k) wcoeffs(i, k) = xt::sum(wts * integrand * xt::col(phi, k))(); } const std::vector>> topology = cell::topology(celltype); M[tdim].push_back(xt::xtensor({ndofs, 1, ndofs})); xt::view(M[tdim][0], xt::all(), 0, xt::all()) = xt::eye(ndofs); xt::xtensor coeffs = compute_expansion_coefficients( celltype, wcoeffs, {M[tdim]}, {x[tdim]}, degree); return FiniteElement(element::family::Bubble, celltype, degree, {1}, coeffs, entity_transformations, x, M, maps::type::identity); } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/bubble.h000066400000000000000000000007101411115224000161010ustar00rootroot00000000000000// Copyright (c) 2020 Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include "cell.h" #include "finite-element.h" namespace basix { /// Create a bubble element on cell with given degree /// @param[in] celltype interval, triangle, tetrahedral, quadrilateral or /// hexahedral celltype /// @param[in] degree /// @return A FiniteElement FiniteElement create_bubble(cell::type celltype, int degree); } // namespace basix basix-0.3.0/cpp/basix/cell.cpp000066400000000000000000000522511411115224000161270ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT #include "cell.h" #include #include #include #include #include using namespace basix; namespace { template xt::xtensor cross3(const U& u, const V& v) { xt::xtensor c = xt::zeros({3}); c[0] = u[1] * v[2] - u[2] * v[1]; c[1] = u[2] * v[0] - u[0] * v[2]; c[2] = u[0] * v[1] - u[1] * v[0]; return c; } } // namespace //----------------------------------------------------------------------------- xt::xtensor cell::geometry(cell::type celltype) { switch (celltype) { case cell::type::interval: return xt::xtensor({{0.0}, {1.0}}); case cell::type::triangle: return xt::xtensor({{0.0, 0.0}, {1.0, 0.0}, {0.0, 1.0}}); case cell::type::quadrilateral: return xt::xtensor( {{0.0, 0.0}, {1.0, 0.0}, {0.0, 1.0}, {1.0, 1.0}}); case cell::type::tetrahedron: return xt::xtensor( {{0.0, 0.0, 0.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0}, {0.0, 0.0, 1.0}}); case cell::type::prism: return xt::xtensor({{0.0, 0.0, 0.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 1.0}, {0.0, 1.0, 1.0}}); case cell::type::pyramid: return xt::xtensor({{0.0, 0.0, 0.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0}, {1.0, 1.0, 0.0}, {0.0, 0.0, 1.0}}); case cell::type::hexahedron: return xt::xtensor({{0.0, 0.0, 0.0}, {1.0, 0.0, 0.0}, {0.0, 1.0, 0.0}, {1.0, 1.0, 0.0}, {0.0, 0.0, 1.0}, {1.0, 0.0, 1.0}, {0.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}); default: throw std::runtime_error("Unsupported cell type"); } } //----------------------------------------------------------------------------- std::vector>> cell::topology(cell::type celltype) { std::vector>> t; switch (celltype) { case cell::type::interval: t.resize(2); // Vertices t[0] = {{0}, {1}}; // Cell t[1] = {{0, 1}}; break; case cell::type::triangle: t.resize(3); // Vertices t[0] = {{0}, {1}, {2}}; // Edges t[1] = {{1, 2}, {0, 2}, {0, 1}}; // Cell t[2] = {{0, 1, 2}}; break; case cell::type::quadrilateral: t.resize(3); // Vertices t[0] = {{0}, {1}, {2}, {3}}; // Edges t[1] = {{0, 1}, {0, 2}, {1, 3}, {2, 3}}; // Cell t[2] = {{0, 1, 2, 3}}; break; case cell::type::tetrahedron: t.resize(4); // Vertices t[0] = {{0}, {1}, {2}, {3}}; // Edges t[1] = {{2, 3}, {1, 3}, {1, 2}, {0, 3}, {0, 2}, {0, 1}}; // Faces t[2] = {{1, 2, 3}, {0, 2, 3}, {0, 1, 3}, {0, 1, 2}}; // Cell t[3] = {{0, 1, 2, 3}}; break; case cell::type::prism: t.resize(4); // Vertices t[0] = {{0}, {1}, {2}, {3}, {4}, {5}}; // Edges t[1] = {{0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 4}, {2, 5}, {3, 4}, {3, 5}, {4, 5}}; // Faces t[2] = {{0, 1, 2}, {0, 1, 3, 4}, {0, 2, 3, 5}, {1, 2, 4, 5}, {3, 4, 5}}; // Cell t[3] = {{0, 1, 2, 3, 4, 5}}; break; case cell::type::pyramid: t.resize(4); // Vertices t[0] = {{0}, {1}, {2}, {3}, {4}}; // Edges t[1] = {{0, 1}, {0, 2}, {0, 4}, {1, 3}, {1, 4}, {2, 3}, {2, 4}, {3, 4}}; // Faces t[2] = {{0, 1, 2, 3}, {0, 1, 4}, {0, 2, 4}, {1, 3, 4}, {2, 3, 4}}; // Cell t[3] = {{0, 1, 2, 3, 4}}; break; case cell::type::hexahedron: t.resize(4); // Vertices t[0] = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}}; // Edges t[1] = {{0, 1}, {0, 2}, {0, 4}, {1, 3}, {1, 5}, {2, 3}, {2, 6}, {3, 7}, {4, 5}, {4, 6}, {5, 7}, {6, 7}}; // Faces t[2] = {{0, 1, 2, 3}, {0, 1, 4, 5}, {0, 2, 4, 6}, {1, 3, 5, 7}, {2, 3, 6, 7}, {4, 5, 6, 7}}; // Cell t[3] = {{0, 1, 2, 3, 4, 5, 6, 7}}; break; default: throw std::runtime_error("Unsupported cell type"); } return t; } //----------------------------------------------------------------------------- std::vector>>> cell::sub_entity_connectivity(cell::type celltype) { std::vector>>> t; switch (celltype) { case cell::type::interval: t.resize(2); // Vertices t[0] = {{{0}, {0}}, {{1}, {0}}}; // Edge t[1] = {{{0, 1}, {0}}}; break; case cell::type::triangle: t.resize(3); // Vertices t[0] = {{{0}, {1, 2}, {0}}, {{1}, {0, 2}, {0}}, {{2}, {0, 1}, {0}}}; // Edges t[1] = {{{1, 2}, {0}, {0}}, {{0, 2}, {1}, {0}}, {{0, 1}, {2}, {0}}}; // Face t[2] = {{{0, 1, 2}, {0, 1, 2}, {0}}}; break; case cell::type::quadrilateral: t.resize(3); // Vertices t[0] = {{{0}, {0, 1}, {0}}, {{1}, {0, 2}, {0}}, {{2}, {1, 3}, {0}}, {{3}, {2, 3}, {0}}}; // Edges t[1] = {{{0, 1}, {0}, {0}}, {{0, 2}, {1}, {0}}, {{1, 3}, {2}, {0}}, {{2, 3}, {3}, {0}}}; // Face t[2] = {{{0, 1, 2, 3}, {0, 1, 2, 3}, {0}}}; break; case cell::type::tetrahedron: t.resize(4); // Vertices t[0] = {{{0}, {3, 4, 5}, {1, 2, 3}, {0}}, {{1}, {1, 2, 5}, {0, 2, 3}, {0}}, {{2}, {0, 2, 4}, {0, 1, 3}, {0}}, {{3}, {0, 1, 3}, {0, 1, 2}, {0}}}; // Edges t[1] = { {{2, 3}, {0}, {0, 1}, {0}}, {{1, 3}, {1}, {0, 2}, {0}}, {{1, 2}, {2}, {0, 3}, {0}}, {{0, 3}, {3}, {1, 2}, {0}}, {{0, 2}, {4}, {1, 3}, {0}}, {{0, 1}, {5}, {2, 3}, {0}}, }; // Faces t[2] = {{{1, 2, 3}, {0, 1, 2}, {0}, {0}}, {{0, 2, 3}, {0, 3, 4}, {1}, {0}}, {{0, 1, 3}, {1, 3, 5}, {2}, {0}}, {{0, 1, 2}, {2, 4, 5}, {3}, {0}}}; // Volume t[3] = {{{0, 1, 2, 3}, {0, 1, 2, 3, 4, 5}, {0, 1, 2, 3}, {0}}}; break; case cell::type::hexahedron: t.resize(4); // Vertices t[0] = { {{0}, {0, 1, 2}, {0, 1, 2}, {0}}, {{1}, {0, 3, 4}, {0, 1, 3}, {0}}, {{2}, {1, 5, 6}, {0, 2, 4}, {0}}, {{3}, {3, 5, 7}, {0, 3, 4}, {0}}, {{4}, {2, 8, 9}, {1, 2, 5}, {0}}, {{5}, {4, 8, 10}, {1, 3, 5}, {0}}, {{6}, {6, 9, 11}, {2, 4, 5}, {0}}, {{7}, {7, 10, 11}, {3, 4, 5}, {0}}}; // Edges t[1] = {{{0, 1}, {0}, {0, 1}, {0}}, {{0, 2}, {1}, {0, 2}, {0}}, {{0, 4}, {2}, {1, 2}, {0}}, {{1, 3}, {3}, {0, 3}, {0}}, {{1, 5}, {4}, {1, 3}, {0}}, {{2, 3}, {5}, {0, 4}, {0}}, {{2, 6}, {6}, {2, 4}, {0}}, {{3, 7}, {7}, {3, 4}, {0}}, {{4, 5}, {8}, {1, 5}, {0}}, {{4, 6}, {9}, {2, 5}, {0}}, {{5, 7}, {10}, {3, 5}, {0}}, {{6, 7}, {11}, {4, 5}, {0}}}; // Faces t[2] = {{{0, 1, 2, 3}, {0, 1, 3, 5}, {0}, {0}}, {{0, 1, 4, 5}, {0, 2, 4, 8}, {1}, {0}}, {{0, 2, 4, 6}, {1, 2, 6, 9}, {2}, {0}}, {{1, 3, 5, 7}, {3, 4, 7, 10}, {3}, {0}}, {{2, 3, 6, 7}, {5, 6, 7, 11}, {4}, {0}}, {{4, 5, 6, 7}, {8, 9, 10, 11}, {5}, {0}}}; // Volume t[3] = {{{0, 1, 2, 3, 4, 5, 6, 7}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, {0, 1, 2, 3, 4, 5}, {0}}}; break; case cell::type::prism: t.resize(4); // Vertices t[0] = {{{0}, {0, 1, 2}, {0, 1, 2}, {0}}, {{1}, {0, 3, 4}, {0, 1, 3}, {0}}, {{2}, {1, 3, 5}, {0, 2, 3}, {0}}, {{3}, {2, 6, 7}, {1, 2, 4}, {0}}, {{4}, {4, 6, 8}, {1, 3, 4}, {0}}, {{5}, {5, 7, 8}, {2, 3, 4}, {0}}}; // Edges t[1] = {{{0, 1}, {0}, {0, 1}, {0}}, {{0, 2}, {1}, {0, 2}, {0}}, {{0, 3}, {2}, {1, 2}, {0}}, {{1, 2}, {3}, {0, 3}, {0}}, {{1, 4}, {4}, {1, 3}, {0}}, {{2, 5}, {5}, {2, 3}, {0}}, {{3, 4}, {6}, {1, 4}, {0}}, {{3, 5}, {7}, {2, 4}, {0}}, {{4, 5}, {8}, {3, 4}, {0}}}; // Faces t[2] = {{{0, 1, 2}, {0, 1, 3}, {0}, {0}}, {{0, 1, 3, 4}, {0, 2, 4, 6}, {1}, {0}}, {{0, 2, 3, 5}, {1, 2, 5, 7}, {2}, {0}}, {{1, 2, 4, 5}, {3, 4, 5, 8}, {3}, {0}}, {{3, 4, 5}, {6, 7, 8}, {4}, {0}}}; // Volume t[3] = {{{0, 1, 2, 3, 4, 5}, {0, 1, 2, 3, 4, 5, 6, 7, 8}, {0, 1, 2, 3, 4}, {0}}}; break; case cell::type::pyramid: t.resize(4); // Vertices t[0] = {{{0}, {0, 1, 2}, {0, 1, 2}, {0}}, {{1}, {0, 3, 4}, {0, 1, 3}, {0}}, {{2}, {1, 5, 6}, {0, 2, 4}, {0}}, {{3}, {3, 5, 7}, {0, 3, 4}, {0}}, {{4}, {2, 4, 6, 7}, {1, 2, 3, 4}, {0}}}; // Edges t[1] = {{{0, 1}, {0}, {0, 1}, {0}}, {{0, 2}, {1}, {0, 2}, {0}}, {{0, 4}, {2}, {1, 2}, {0}}, {{1, 3}, {3}, {0, 3}, {0}}, {{1, 4}, {4}, {1, 3}, {0}}, {{2, 3}, {5}, {0, 4}, {0}}, {{2, 4}, {6}, {2, 4}, {0}}, {{3, 4}, {7}, {3, 4}, {0}}}; // Faces t[2] = {{{0, 1, 2, 3}, {0, 1, 3, 5}, {0}, {0}}, {{0, 1, 4}, {0, 2, 4}, {1}, {0}}, {{0, 2, 4}, {1, 2, 6}, {2}, {0}}, {{1, 3, 4}, {3, 4, 7}, {3}, {0}}, {{2, 3, 4}, {5, 6, 7}, {4}, {0}}}; // Volume t[3] = {{{0, 1, 2, 3, 4}, {0, 1, 2, 3, 4, 5, 6, 7}, {0, 1, 2, 3, 4}, {0}}}; break; default: throw std::runtime_error("Unsupported cell type"); } return t; } //----------------------------------------------------------------------------- int cell::topological_dimension(cell::type cell_type) { switch (cell_type) { case cell::type::interval: return 1; case cell::type::triangle: return 2; case cell::type::quadrilateral: return 2; case cell::type::tetrahedron: return 3; case cell::type::hexahedron: return 3; case cell::type::prism: return 3; case cell::type::pyramid: return 3; default: throw std::runtime_error("Unsupported cell type"); } return 0; } //----------------------------------------------------------------------------- xt::xtensor cell::sub_entity_geometry(cell::type celltype, int dim, int index) { const std::vector>> cell_topology = cell::topology(celltype); if (dim < 0 or dim >= (int)cell_topology.size()) throw std::runtime_error("Invalid dimension for sub-entity"); const xt::xtensor cell_geometry = cell::geometry(celltype); const std::vector>& t = cell_topology[dim]; if (index < 0 or index >= (int)t.size()) throw std::runtime_error("Invalid entity index"); xt::xtensor sub_entity({t[index].size(), cell_geometry.shape(1)}); for (std::size_t i = 0; i < sub_entity.shape(0); ++i) xt::row(sub_entity, i) = xt::row(cell_geometry, t[index][i]); return sub_entity; } //---------------------------------------------------------------------------- int cell::num_sub_entities(cell::type celltype, int dim) { const std::vector>> cell_topology = cell::topology(celltype); return cell_topology.at(dim).size(); } //---------------------------------------------------------------------------- cell::type cell::sub_entity_type(cell::type celltype, int dim, int index) { const int tdim = cell::topological_dimension(celltype); assert(dim >= 0 and dim <= tdim); if (dim == 0) return cell::type::point; else if (dim == 1) return cell::type::interval; else if (dim == tdim) return celltype; const std::vector>> t = cell::topology(celltype); switch (t[dim][index].size()) { case 3: return cell::type::triangle; case 4: return cell::type::quadrilateral; default: throw std::runtime_error("Error in sub_entity_type"); } } //----------------------------------------------------------------------------- cell::type cell::str_to_type(std::string name) { static const std::map name_to_type = {{"point", cell::type::point}, {"interval", cell::type::interval}, {"triangle", cell::type::triangle}, {"tetrahedron", cell::type::tetrahedron}, {"quadrilateral", cell::type::quadrilateral}, {"pyramid", cell::type::pyramid}, {"prism", cell::type::prism}, {"hexahedron", cell::type::hexahedron}}; auto it = name_to_type.find(name); if (it == name_to_type.end()) throw std::runtime_error("Can't find name " + name); return it->second; } //----------------------------------------------------------------------------- const std::string& cell::type_to_str(cell::type type) { static const std::map type_to_name = {{cell::type::point, "point"}, {cell::type::interval, "interval"}, {cell::type::triangle, "triangle"}, {cell::type::tetrahedron, "tetrahedron"}, {cell::type::quadrilateral, "quadrilateral"}, {cell::type::pyramid, "pyramid"}, {cell::type::prism, "prism"}, {cell::type::hexahedron, "hexahedron"}}; auto it = type_to_name.find(type); if (it == type_to_name.end()) throw std::runtime_error("Can't find type"); return it->second; } //----------------------------------------------------------------------------- double cell::volume(cell::type cell_type) { switch (cell_type) { case cell::type::interval: return 1; case cell::type::triangle: return 0.5; case cell::type::quadrilateral: return 1; case cell::type::tetrahedron: return 1.0 / 6; case cell::type::hexahedron: return 1; case cell::type::prism: return 0.5; case cell::type::pyramid: return 1.0 / 3; default: throw std::runtime_error("Unsupported cell type"); } } //----------------------------------------------------------------------------- xt::xtensor cell::facet_outward_normals(cell::type cell_type) { xt::xtensor normals = cell::facet_normals(cell_type); const std::vector facet_orientations = cell::facet_orientations(cell_type); for (std::size_t f = 0; f < normals.shape(0); ++f) { if (facet_orientations[f]) xt::row(normals, f) *= -1.0; } return normals; } //----------------------------------------------------------------------------- xt::xtensor cell::facet_normals(cell::type cell_type) { const int tdim = cell::topological_dimension(cell_type); const xt::xtensor x = cell::geometry(cell_type); const std::vector> facets = cell::topology(cell_type)[tdim - 1]; xt::xtensor normals( {facets.size(), static_cast(tdim)}); switch (tdim) { case 1: return xt::ones({facets.size(), static_cast(1)}); case 2: { for (std::size_t f = 0; f < facets.size(); ++f) { const std::vector& facet = facets[f]; auto normal = xt::row(normals, f); assert(facet.size() == 2); normal(0) = x(facet[1], 1) - x(facet[0], 1); normal(1) = x(facet[0], 0) - x(facet[1], 0); normal /= xt::sqrt(xt::sum(normal * normal)); } return normals; } case 3: { for (std::size_t f = 0; f < facets.size(); ++f) { const std::vector& facet = facets[f]; auto normal = xt::row(normals, f); assert(facets[f].size() == 3 or facets[f].size() == 4); auto e0 = xt::row(x, facet[1]) - xt::row(x, facet[0]); auto e1 = xt::row(x, facet[2]) - xt::row(x, facet[0]); normal = cross3(e0, e1); normal /= xt::sqrt(xt::sum(normal * normal)); } return normals; } default: throw std::runtime_error("Wrong topological dimension"); } } //----------------------------------------------------------------------------- std::vector cell::facet_orientations(cell::type cell_type) { const std::size_t tdim = cell::topological_dimension(cell_type); const xt::xtensor x = cell::geometry(cell_type); const std::vector> facets = cell::topology(cell_type)[tdim - 1]; const xt::xtensor normals = cell::facet_normals(cell_type); const xt::xtensor midpoint = xt::mean(x, 0); std::vector orientations(normals.shape(0)); for (std::size_t f = 0; f < normals.shape(0); ++f) { auto normal = xt::row(normals, f); auto x0 = xt::row(x, facets[f][0]) - midpoint; const double dot = xt::sum(x0 * normal)(); orientations[f] = dot < 0; } return orientations; } //----------------------------------------------------------------------------- xt::xtensor cell::facet_reference_volumes(cell::type cell_type) { const int tdim = cell::topological_dimension(cell_type); std::vector facet_types = cell::subentity_types(cell_type)[tdim - 1]; std::array shape = {facet_types.size()}; xt::xtensor out(shape); for (std::size_t i = 0; i < facet_types.size(); ++i) out(i) = cell::volume(facet_types[i]); return out; } //----------------------------------------------------------------------------- std::vector> cell::subentity_types(cell::type cell_type) { switch (cell_type) { case cell::type::interval: return {{cell::type::point, cell::type::point}, {cell::type::interval}}; case cell::type::triangle: return {{cell::type::point, cell::type::point, cell::type::point}, {cell::type::interval, cell::type::interval, cell::type::interval}, {cell::type::triangle}}; case cell::type::quadrilateral: return {{cell::type::point, cell::type::point, cell::type::point, cell::type::point}, {cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval}, {cell::type::quadrilateral}}; case cell::type::tetrahedron: return {{cell::type::point, cell::type::point, cell::type::point, cell::type::point}, {cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval}, {cell::type::triangle, cell::type::triangle, cell::type::triangle, cell::type::triangle}, {cell::type::tetrahedron}}; case cell::type::hexahedron: return {{cell::type::point, cell::type::point, cell::type::point, cell::type::point, cell::type::point, cell::type::point, cell::type::point, cell::type::point}, {cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval}, {cell::type::quadrilateral, cell::type::quadrilateral, cell::type::quadrilateral, cell::type::quadrilateral, cell::type::quadrilateral, cell::type::quadrilateral}, {cell::type::hexahedron}}; case cell::type::prism: return {{cell::type::point, cell::type::point, cell::type::point, cell::type::point, cell::type::point, cell::type::point}, {cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval}, {cell::type::triangle, cell::type::quadrilateral, cell::type::quadrilateral, cell::type::quadrilateral, cell::type::triangle}, {cell::type::prism}}; case cell::type::pyramid: return {{cell::type::point, cell::type::point, cell::type::point, cell::type::point, cell::type::point}, {cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval, cell::type::interval}, {cell::type::quadrilateral, cell::type::triangle, cell::type::triangle, cell::type::triangle, cell::type::triangle}, {cell::type::pyramid}}; default: throw std::runtime_error("Unsupported cell type"); } } //----------------------------------------------------------------------------- xt::xtensor cell::facet_jacobians(cell::type cell_type) { const std::size_t tdim = cell::topological_dimension(cell_type); if (tdim != 2 and tdim != 3) { throw std::runtime_error( "Facet jacobians not supported for this cell type."); } const xt::xtensor x = cell::geometry(cell_type); const std::vector> facets = cell::topology(cell_type)[tdim - 1]; xt::xtensor jacobians({facets.size(), tdim, tdim - 1}); for (std::size_t f = 0; f < facets.size(); ++f) { const std::vector& facet = facets[f]; auto x0 = xt::row(x, facet[0]); for (std::size_t j = 0; j < tdim - 1; ++j) xt::view(jacobians, f, xt::all(), j) = xt::row(x, facet[1 + j]) - x0; } return jacobians; } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/cell.h000066400000000000000000000076651411115224000156050ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include #include /// Information about reference cells including their topological and /// geometric data. namespace basix::cell { /// Cell type enum class type { point = 0, interval = 1, triangle = 2, tetrahedron = 3, quadrilateral = 4, hexahedron = 5, prism = 6, pyramid = 7 }; /// Cell geometry /// @param celltype Cell Type /// @return Set of vertex points of the cell xt::xtensor geometry(cell::type celltype); /// Cell topology /// @param celltype Cell Type /// @return List of topology (vertex indices) for each dimension (0..tdim) std::vector>> topology(cell::type celltype); /// Get the numbers of entities connected to each subentity of the cell. /// /// Returns a vector of the form: output[dim][entity_n][connected_dim] = /// [connected_entity_n0, connected_entity_n1, ...] This indicates that the /// entity of dimension `dim` and number `entity_n` is connected to the entities /// of dimension `connected_dim` and numbers `connected_entity_n0`, /// `connected_entity_n1`, ... /// /// @param celltype Cell Type /// @return List of topology (vertex indices) for each dimension (0..tdim) std::vector>>> sub_entity_connectivity(cell::type celltype); /// Sub-entity of a cell, given by topological dimension and index /// @param celltype The cell::type /// @param dim Dimension of sub-entity /// @param index Local index of sub-entity /// @return Set of vertex points of the sub-entity xt::xtensor sub_entity_geometry(cell::type celltype, int dim, int index); /// @todo Optimise this function /// Number of sub-entities of a cell by topological dimension /// @param celltype The cell::type /// @param dim Dimension of sub-entity /// @return The number of sub-entities of the given dimension /// @warning This function is expensive to call. Do not use in /// performance critical code int num_sub_entities(cell::type celltype, int dim); /// Get the topological dimension for a given cell type /// @param celltype Cell type /// @return the topological dimension int topological_dimension(cell::type celltype); /// Get the cell type of a sub-entity of given dimension and index /// @param celltype Type of cell /// @param dim Topological dimension of sub-entity /// @param index Index of sub-entity /// @return cell type of sub-entity cell::type sub_entity_type(cell::type celltype, int dim, int index); /// Convert a cell type string to enum /// @param name String /// @return cell type cell::type str_to_type(std::string name); /// Convert cell type enum to string const std::string& type_to_str(cell::type type); /// Get the volume of a reference cell /// @param cell_type Type of cell double volume(cell::type cell_type); /// Get the (outward) normals to the facets of a reference cell /// @param cell_type Type of cell xt::xtensor facet_outward_normals(cell::type cell_type); /// Get the normals to the facets of a reference cell oriented using the /// low-to-high ordering of the facet /// @param cell_type Type of cell xt::xtensor facet_normals(cell::type cell_type); /// Get a array of bools indicating whether or not the facet normals are outward /// pointing /// @param cell_type Type of cell std::vector facet_orientations(cell::type cell_type); /// Get the reference volumes of the facets of a reference cell /// @param cell_type Type of cell xt::xtensor facet_reference_volumes(cell::type cell_type); /// Get the reference volumes of the facets of a reference cell /// @param cell_type Type of cell std::vector> subentity_types(cell::type cell_type); /// Get the jacobians of the facets of a reference cell /// @param cell_type Type of cell xt::xtensor facet_jacobians(cell::type cell_type); } // namespace basix::cell basix-0.3.0/cpp/basix/crouzeix-raviart.cpp000066400000000000000000000047641411115224000205340ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT #include "crouzeix-raviart.h" #include "cell.h" #include "element-families.h" #include "maps.h" #include #include #include #include #include using namespace basix; //----------------------------------------------------------------------------- FiniteElement basix::create_cr(cell::type celltype, int degree) { if (degree != 1) throw std::runtime_error("Degree must be 1 for Crouzeix-Raviart"); const std::size_t tdim = cell::topological_dimension(celltype); if (tdim < 2) { throw std::runtime_error( "topological dim must be 2 or 3 for Crouzeix-Raviart"); } if (celltype != cell::type::triangle and celltype != cell::type::tetrahedron) { throw std::runtime_error( "Crouzeix-Raviart is only defined on triangles and tetrahedra."); } const std::vector>> topology = cell::topology(celltype); const std::vector>& facet_topology = topology[tdim - 1]; const std::size_t ndofs = facet_topology.size(); const xt::xtensor geometry = cell::geometry(celltype); std::array>, 4> M; std::array>, 4> x; x[tdim - 1].resize(facet_topology.size(), xt::zeros({static_cast(1), tdim})); // Compute facet midpoints for (std::size_t f = 0; f < facet_topology.size(); ++f) { auto v = xt::view(geometry, xt::keep(facet_topology[f]), xt::all()); xt::row(x[tdim - 1][f], 0) = xt::mean(v, 0); } std::map> entity_transformations; if (celltype == cell::type::triangle) { entity_transformations[cell::type::interval] = {{{1.}}}; } else if (celltype == cell::type::tetrahedron) { entity_transformations[cell::type::interval] = xt::xtensor({1, 0, 0}); entity_transformations[cell::type::triangle] = {{{1}}, {{1}}}; } M[tdim - 1].resize(facet_topology.size(), xt::ones({1, 1, 1})); const xt::xtensor coeffs = compute_expansion_coefficients( celltype, xt::eye(ndofs), {M[tdim - 1]}, {x[tdim - 1]}, degree); return FiniteElement(element::family::CR, celltype, 1, {1}, coeffs, entity_transformations, x, M, maps::type::identity); } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/crouzeix-raviart.h000066400000000000000000000005361411115224000201720ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include "finite-element.h" namespace basix { /// Crouzeix-Raviart element /// @note degree must be 1 for Crouzeix-Raviart /// @param celltype /// @param degree FiniteElement create_cr(cell::type celltype, int degree); } // namespace basix basix-0.3.0/cpp/basix/dof-transformations.cpp000066400000000000000000000040241411115224000212020ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson & Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #include "dof-transformations.h" using namespace basix; //----------------------------------------------------------------------------- std::vector doftransforms::interval_reflection(int degree) { std::vector perm(degree); for (int i = 0; i < degree; ++i) perm[i] = degree - 1 - i; return perm; } //----------------------------------------------------------------------------- std::vector doftransforms::triangle_reflection(int degree) { const int n = degree * (degree + 1) / 2; std::vector perm(n); int p = 0; for (int st = 0; st < degree; ++st) { int dof = st; for (int add = degree; add > st; --add) { perm[p++] = dof; dof += add; } } return perm; } //----------------------------------------------------------------------------- std::vector doftransforms::triangle_rotation(int degree) { const int n = degree * (degree + 1) / 2; std::vector perm(n); int p = 0; int st = n - 1; for (int i = 1; i <= degree; ++i) { int dof = st; for (int sub = i; sub <= degree; ++sub) { perm[p++] = dof; dof -= sub + 1; } st -= i; } return perm; } //----------------------------------------------------------------------------- std::vector doftransforms::quadrilateral_reflection(int degree) { const int n = degree * degree; std::vector perm(n); int p = 0; for (int st = 0; st < degree; ++st) for (int i = 0; i < degree; ++i) perm[p++] = st + i * degree; return perm; } //----------------------------------------------------------------------------- std::vector doftransforms::quadrilateral_rotation(int degree) { const int n = degree * degree; std::vector perm(n); int p = 0; for (int st = degree - 1; st >= 0; --st) for (int i = 0; i < degree; ++i) perm[st + degree * i] = p++; return perm; } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/dof-transformations.h000066400000000000000000000030541411115224000206510ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson & Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include /// Functions to help with the creation of DOF transformation and /// direction correction. namespace basix::doftransforms { /// Reflect the DOFs on an interval /// @param degree The number of DOFs on the interval /// @return A reordering of the numbers 0 to degree-1 representing the /// transformation std::vector interval_reflection(int degree); /// Reflect the DOFs on a triangle /// @param degree The number of DOFs along one side of the triangle /// @return A reordering of the numbers 0 to (degree)*(degree+1)/2-1 /// representing the transformation std::vector triangle_reflection(int degree); /// Rotate the DOFs on a triangle /// @param degree The number of DOFs along one side of the triangle /// @return A reordering of the numbers 0 to (degree)*(degree+1)/2-1 /// representing the transformation std::vector triangle_rotation(int degree); /// Reflect the DOFs on a quadrilateral /// @param degree The number of DOFs along one side of the quadrilateral /// @return A reordering of the numbers 0 to degree*degree-1 representing the /// transformation std::vector quadrilateral_reflection(int degree); /// Rotate the DOFs on a quadrilateral /// @param degree The number of DOFs along one side of the quadrilateral /// @return A reordering of the numbers 0 to degree*degree-1 representing the /// transformation std::vector quadrilateral_rotation(int degree); } // namespace basix::doftransforms basix-0.3.0/cpp/basix/element-families.cpp000066400000000000000000000037351411115224000204330ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson & Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #include "element-families.h" #include #include using namespace basix; //----------------------------------------------------------------------------- element::family element::str_to_type(std::string name) { static const std::map name_to_type = {{"Custom element", element::family::custom}, {"P", element::family::P}, {"DP", element::family::DP}, {"DPC", element::family::DPC}, {"BDM", element::family::BDM}, {"RT", element::family::RT}, {"N1E", element::family::N1E}, {"N2E", element::family::N2E}, {"Regge", element::family::Regge}, {"CR", element::family::CR}, {"Bubble", element::family::Bubble}, {"Serendipity", element::family::Serendipity}}; auto it = name_to_type.find(name); if (it == name_to_type.end()) throw std::runtime_error("Can't find name " + name); return it->second; } //----------------------------------------------------------------------------- const std::string& element::type_to_str(element::family type) { static const std::map name_to_type = {{element::family::custom, "Custom element"}, {element::family::P, "P"}, {element::family::DP, "DP"}, {element::family::DPC, "DPC"}, {element::family::BDM, "BDM"}, {element::family::RT, "RT"}, {element::family::N1E, "N1E"}, {element::family::N2E, "N2E"}, {element::family::Regge, "Regge"}, {element::family::CR, "CR"}, {element::family::Bubble, "Bubble"}, {element::family::Serendipity, "Serendipity"}}; auto it = name_to_type.find(type); if (it == name_to_type.end()) throw std::runtime_error("Can't find type"); return it->second; } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/element-families.h000066400000000000000000000011011411115224000200610ustar00rootroot00000000000000// Copyright (c) 2020 Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include namespace basix { namespace element { /// Enum of available element families enum class family { custom = 0, P = 1, RT = 2, N1E = 3, BDM = 4, N2E = 5, CR = 6, Regge = 7, DP = 8, DPC = 9, Bubble = 10, Serendipity = 11 }; /// Convert string to a family element::family str_to_type(std::string name); // Convert family to string const std::string& type_to_str(element::family type); } // namespace element } // namespace basix basix-0.3.0/cpp/basix/finite-element.cpp000066400000000000000000000654631411115224000201260ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson & Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #include "finite-element.h" #include "brezzi-douglas-marini.h" #include "bubble.h" #include "crouzeix-raviart.h" #include "lagrange.h" #include "nce-rtc.h" #include "nedelec.h" #include "polyset.h" #include "raviart-thomas.h" #include "regge.h" #include "serendipity.h" #include "version.h" #include #include #include #include #include #include #define str_macro(X) #X #define str(X) str_macro(X) using namespace basix; namespace { constexpr int compute_value_size(maps::type map_type, int dim) { switch (map_type) { case maps::type::identity: return 1; case maps::type::covariantPiola: return dim; case maps::type::contravariantPiola: return dim; case maps::type::doubleCovariantPiola: return dim * dim; case maps::type::doubleContravariantPiola: return dim * dim; default: throw std::runtime_error("Mapping not yet implemented"); } } //----------------------------------------------------------------------------- constexpr int num_transformations(cell::type cell_type) { switch (cell_type) { case cell::type::point: return 0; case cell::type::interval: return 0; case cell::type::triangle: return 3; case cell::type::quadrilateral: return 4; case cell::type::tetrahedron: return 14; case cell::type::hexahedron: return 24; case cell::type::prism: return 19; case cell::type::pyramid: return 18; default: throw std::runtime_error("Cell type not yet supported"); } } } // namespace //----------------------------------------------------------------------------- basix::FiniteElement basix::create_element(element::family family, cell::type cell, int degree) { switch (family) { case element::family::P: throw std::runtime_error( "Lagrange elements need to be given a lattice type."); case element::family::DP: return create_dlagrange(cell, degree); case element::family::BDM: switch (cell) { case cell::type::quadrilateral: return create_serendipity_div(cell, degree); case cell::type::hexahedron: return create_serendipity_div(cell, degree); default: return create_bdm(cell, degree); } case element::family::RT: { switch (cell) { case cell::type::quadrilateral: return create_rtc(cell, degree); case cell::type::hexahedron: return create_rtc(cell, degree); default: return create_rt(cell, degree); } } case element::family::N1E: { switch (cell) { case cell::type::quadrilateral: return create_nce(cell, degree); case cell::type::hexahedron: return create_nce(cell, degree); default: return create_nedelec(cell, degree); } } case element::family::N2E: switch (cell) { case cell::type::quadrilateral: return create_serendipity_curl(cell, degree); case cell::type::hexahedron: return create_serendipity_curl(cell, degree); default: return create_nedelec2(cell, degree); } case element::family::Regge: return create_regge(cell, degree); case element::family::CR: return create_cr(cell, degree); case element::family::Bubble: return create_bubble(cell, degree); case element::family::Serendipity: return create_serendipity(cell, degree); case element::family::DPC: return create_dpc(cell, degree); default: throw std::runtime_error("Element family not found"); } } //----------------------------------------------------------------------------- basix::FiniteElement basix::create_element(element::family family, cell::type cell, int degree, lattice::type lattice_type) { switch (family) { case element::family::P: return create_lagrange(cell, degree, lattice_type); default: throw std::runtime_error("Cannot pass a lattice type to this element."); } } //----------------------------------------------------------------------------- xt::xtensor basix::compute_expansion_coefficients( cell::type celltype, const xt::xtensor& B, const std::vector>>& M, const std::vector>>& x, int degree, double kappa_tol) { std::size_t num_dofs(0), vs(0); for (auto& Md : M) { for (auto& Me : Md) { num_dofs += Me.shape(0); if (vs == 0) vs = Me.shape(1); else if (vs != Me.shape(1)) throw std::runtime_error("Inconsistent value size"); } } std::size_t pdim = polyset::dim(celltype, degree); xt::xtensor D = xt::zeros({num_dofs, vs, pdim}); // Loop over different dimensions std::size_t dof_index = 0; for (std::size_t d = 0; d < M.size(); ++d) { // Loop over entities of dimension d for (std::size_t e = 0; e < x[d].size(); ++e) { // Evaluate polynomial basis at x[d] const xt::xtensor& x_e = x[d][e]; xt::xtensor P; if (x_e.shape(1) == 1 and x_e.size() != 0) { auto pts = xt::view(x_e, xt::all(), 0); P = xt::view(polyset::tabulate(celltype, degree, 0, pts), 0, xt::all(), xt::all()); } else if (x_e.size() != 0) { P = xt::view(polyset::tabulate(celltype, degree, 0, x_e), 0, xt::all(), xt::all()); } // Me: [dof, vs, point] const xt::xtensor& Me = M[d][e]; // Compute dual matrix contribution for (std::size_t i = 0; i < Me.shape(0); ++i) // Dof index for (std::size_t j = 0; j < Me.shape(1); ++j) // Value index for (std::size_t k = 0; k < Me.shape(2); ++k) // Point for (std::size_t l = 0; l < P.shape(1); ++l) // Polynomial term D(dof_index + i, j, l) += Me(i, j, k) * P(k, l); // Dtmp += xt::linalg::dot(Me, P); dof_index += M[d][e].shape(0); } } // Compute B D^{T} // xt::xtensor A = xt::zeros({num_dofs, num_dofs}); // for (std::size_t i = 0; i < A.shape(0); ++i) // for (std::size_t j = 0; j < A.shape(1); ++j) // for (std::size_t k = 0; k < vs; ++k) // for (std::size_t l = 0; l < B[k].shape(1); ++l) // A(i, j) += B[k](i, l) * D(j, k, l); /// Flatten D and take transpose auto Dt_flat = xt::transpose( xt::reshape_view(D, {D.shape(0), D.shape(1) * D.shape(2)})); xt::xtensor BDt = xt::linalg::dot(B, Dt_flat); if (kappa_tol >= 1.0) { if (xt::linalg::cond(BDt, 2) > kappa_tol) { throw std::runtime_error("Condition number of B.D^T when computing " "expansion coefficients exceeds tolerance."); } } // Note: forcing the layout type to get around an xtensor bug with Intel // Compilers // https://github.com/xtensor-stack/xtensor/issues/2351 xt::xtensor B_cmajor( {B.shape(0), B.shape(1)}); B_cmajor.assign(B); // Compute C = (BD^T)^{-1} B auto result = xt::linalg::solve(BDt, B_cmajor); xt::xtensor C({result.shape(0), result.shape(1)}); C.assign(result); return xt::reshape_view(C, {num_dofs, vs, pdim}); } //----------------------------------------------------------------------------- FiniteElement::FiniteElement( element::family family, cell::type cell_type, int degree, const std::vector& value_shape, const xt::xtensor& coeffs, const std::map>& entity_transformations, const std::array>, 4>& x, const std::array>, 4>& M, maps::type map_type) : map_type(map_type), _cell_type(cell_type), _cell_tdim(cell::topological_dimension(cell_type)), _cell_subentity_types(cell::subentity_types(cell_type)), _family(family), _degree(degree), _map_type(map_type), _coeffs(xt::reshape_view( coeffs, {coeffs.shape(0), coeffs.shape(1) * coeffs.shape(2)})), _entity_transformations(entity_transformations), _x(x), _matM_new(M) { // if (points.dimension() == 1) // throw std::runtime_error("Problem with points"); _value_shape = std::vector(value_shape.begin(), value_shape.end()); std::size_t num_points = 0; for (auto& x_dim : x) for (auto& x_e : x_dim) num_points += x_e.shape(0); std::size_t counter = 0; _points.resize({num_points, _cell_tdim}); for (auto& x_dim : x) for (auto& x_e : x_dim) for (std::size_t p = 0; p < x_e.shape(0); ++p) xt::row(_points, counter++) = xt::row(x_e, p); // Copy into _matM const std::size_t value_size = std::accumulate(value_shape.begin(), value_shape.end(), 1, std::multiplies()); // Count number of dofs and point std::size_t num_dofs(0), num_points1(0); for (std::size_t d = 0; d < M.size(); ++d) { for (std::size_t e = 0; e < M[d].size(); ++e) { num_dofs += M[d][e].shape(0); num_points1 += M[d][e].shape(2); } } // Copy data into old _matM matrix _matM = xt::zeros({num_dofs, value_size * num_points1}); auto Mview = xt::reshape_view(_matM, {num_dofs, value_size, num_points1}); // Loop over each topological dimensions std::size_t dof_offset(0), point_offset(0); for (std::size_t d = 0; d < M.size(); ++d) { // Loop of entities of dimension d for (std::size_t e = 0; e < M[d].size(); ++e) { auto dof_range = xt::range(dof_offset, dof_offset + M[d][e].shape(0)); auto point_range = xt::range(point_offset, point_offset + M[d][e].shape(2)); xt::view(Mview, dof_range, xt::all(), point_range) = M[d][e]; point_offset += M[d][e].shape(2); dof_offset += M[d][e].shape(0); } } // Compute number of dofs for each cell entity (computed from // interpolation data) const std::vector>> topology = cell::topology(cell_type); const std::vector>>> connectivity = cell::sub_entity_connectivity(cell_type); _num_edofs.resize(_cell_tdim + 1); _edofs.resize(_cell_tdim + 1); int dof = 0; for (std::size_t d = 0; d < _num_edofs.size(); ++d) { _num_edofs[d].resize(cell::num_sub_entities(_cell_type, d), 0); _edofs[d].resize(cell::num_sub_entities(_cell_type, d)); for (std::size_t e = 0; e < M[d].size(); ++e) { _num_edofs[d][e] = M[d][e].shape(0); for (int i = 0; i < _num_edofs[d][e]; ++i) _edofs[d][e].insert(dof++); } } _num_e_closure_dofs.resize(_cell_tdim + 1); _e_closure_dofs.resize(_cell_tdim + 1); for (std::size_t d = 0; d < _num_edofs.size(); ++d) { _num_e_closure_dofs[d].resize(cell::num_sub_entities(_cell_type, d)); _e_closure_dofs[d].resize(cell::num_sub_entities(_cell_type, d)); for (std::size_t e = 0; e < _e_closure_dofs[d].size(); ++e) { for (std::size_t dim = 0; dim <= d; ++dim) { for (int c : connectivity[d][e][dim]) { _num_e_closure_dofs[d][e] += _edofs[dim][c].size(); for (int dof : _edofs[dim][c]) _e_closure_dofs[d][e].insert(dof); } } } } // Check that nunber of dofs os equal to number of coefficients if (num_dofs != _coeffs.shape(0)) { throw std::runtime_error( "Number of entity dofs does not match total number of dofs"); } // Check if base transformations are all permutations _dof_transformations_are_permutations = true; _dof_transformations_are_identity = true; for (const auto& et : _entity_transformations) { for (std::size_t i = 0; _dof_transformations_are_permutations and i < et.second.shape(0); ++i) { for (std::size_t row = 0; row < et.second.shape(1); ++row) { double rmin = xt::amin(xt::view(et.second, i, row, xt::all()))(0); double rmax = xt::amax(xt::view(et.second, i, row, xt::all()))(0); double rtot = xt::sum(xt::view(et.second, i, row, xt::all()))(0); if ((et.second.shape(2) != 1 and !xt::allclose(rmin, 0)) or !xt::allclose(rmax, 1) or !xt::allclose(rtot, 1)) { _dof_transformations_are_permutations = false; _dof_transformations_are_identity = false; break; } if (!xt::allclose(et.second(i, row, row), 1)) _dof_transformations_are_identity = false; } } if (!_dof_transformations_are_permutations) break; } if (!_dof_transformations_are_identity) { // If transformations are permutations, then create the permutations if (_dof_transformations_are_permutations) { for (const auto& et : _entity_transformations) { _eperm[et.first] = std::vector>(et.second.shape(0)); _eperm_rev[et.first] = std::vector>(et.second.shape(0)); for (std::size_t i = 0; i < et.second.shape(0); ++i) { std::vector perm(et.second.shape(1)); std::vector rev_perm(et.second.shape(1)); for (std::size_t row = 0; row < et.second.shape(1); ++row) { for (std::size_t col = 0; col < et.second.shape(1); ++col) { if (et.second(i, row, col) > 0.5) { perm[row] = col; rev_perm[col] = row; break; } } } // Factorise the permutations _eperm[et.first][i] = precompute::prepare_permutation(perm); _eperm_rev[et.first][i] = precompute::prepare_permutation(rev_perm); } } } // Precompute the DOF transformations for (const auto& et : _entity_transformations) { _etrans[et.first] = std::vector< std::tuple, std::vector, xt::xtensor>>(et.second.shape(0)); _etransT[et.first] = std::vector< std::tuple, std::vector, xt::xtensor>>(et.second.shape(0)); _etrans_invT[et.first] = std::vector< std::tuple, std::vector, xt::xtensor>>(et.second.shape(0)); _etrans_inv[et.first] = std::vector< std::tuple, std::vector, xt::xtensor>>(et.second.shape(0)); for (std::size_t i = 0; i < et.second.shape(0); ++i) { if (et.second.shape(1) > 0) { const xt::xtensor& M = xt::view(et.second, i, xt::all(), xt::all()); _etrans[et.first][i] = precompute::prepare_matrix(M); auto M_t = xt::transpose(M); _etransT[et.first][i] = precompute::prepare_matrix(M_t); xt::xtensor Minv; // Rotation of a face: this is in the only base transformation such // that M^{-1} != M. // For a quadrilateral face, M^4 = Id, so M^{-1} = M^3. // For a triangular face, M^3 = Id, so M^{-1} = M^2. // This assumes that all faces of the cell are the same shape. For // prisms and pyramids, this will need updating to look at the face // type if (et.first == cell::type::quadrilateral and i == 0) Minv = xt::linalg::dot(xt::linalg::dot(M, M), M); else if (et.first == cell::type::triangle and i == 0) Minv = xt::linalg::dot(M, M); else Minv = M; _etrans_inv[et.first][i] = precompute::prepare_matrix(Minv); auto MinvT = xt::transpose(Minv); _etrans_invT[et.first][i] = precompute::prepare_matrix(MinvT); } } } } } //----------------------------------------------------------------------------- cell::type FiniteElement::cell_type() const { return _cell_type; } //----------------------------------------------------------------------------- int FiniteElement::degree() const { return _degree; } //----------------------------------------------------------------------------- int FiniteElement::value_size() const { return std::accumulate(_value_shape.begin(), _value_shape.end(), 1, std::multiplies()); } //----------------------------------------------------------------------------- const std::vector& FiniteElement::value_shape() const { return _value_shape; } //----------------------------------------------------------------------------- int FiniteElement::dim() const { return _coeffs.shape(0); } //----------------------------------------------------------------------------- element::family FiniteElement::family() const { return _family; } //----------------------------------------------------------------------------- maps::type FiniteElement::mapping_type() const { return _map_type; } //----------------------------------------------------------------------------- bool FiniteElement::dof_transformations_are_permutations() const { return _dof_transformations_are_permutations; } //----------------------------------------------------------------------------- bool FiniteElement::dof_transformations_are_identity() const { return _dof_transformations_are_identity; } //----------------------------------------------------------------------------- const xt::xtensor& FiniteElement::interpolation_matrix() const { return _matM; } //----------------------------------------------------------------------------- const std::vector>& FiniteElement::num_entity_dofs() const { return _num_edofs; } //----------------------------------------------------------------------------- const std::vector>>& FiniteElement::entity_dofs() const { return _edofs; } //----------------------------------------------------------------------------- const std::vector>& FiniteElement::num_entity_closure_dofs() const { return _num_e_closure_dofs; } //----------------------------------------------------------------------------- const std::vector>>& FiniteElement::entity_closure_dofs() const { return _e_closure_dofs; } //----------------------------------------------------------------------------- xt::xtensor FiniteElement::tabulate(int nd, const xt::xarray& x) const { std::size_t ndsize = 1; for (int i = 1; i <= nd; ++i) ndsize *= (_cell_tdim + i); for (int i = 1; i <= nd; ++i) ndsize /= i; const std::size_t vs = value_size(); const std::size_t ndofs = _coeffs.shape(0); xt::xarray _x = x; if (_x.dimension() == 1) _x.reshape({_x.shape(0), 1}); xt::xtensor data({ndsize, x.shape(0), ndofs, vs}); tabulate(nd, _x, data); return data; } //----------------------------------------------------------------------------- void FiniteElement::tabulate(int nd, const xt::xarray& x, xt::xtensor& basis_data) const { xt::xarray _x = x; if (_x.dimension() == 2 and x.shape(1) == 1) _x.reshape({x.shape(0)}); if (_x.shape(1) != _cell_tdim) throw std::runtime_error("Point dim does not match element dim."); xt::xtensor basis = polyset::tabulate(_cell_type, _degree, nd, _x); const int psize = polyset::dim(_cell_type, _degree); const int vs = value_size(); xt::xtensor B, C; for (std::size_t p = 0; p < basis.shape(0); ++p) { for (int j = 0; j < vs; ++j) { auto basis_view = xt::view(basis_data, p, xt::all(), xt::all(), j); B = xt::view(basis, p, xt::all(), xt::all()); C = xt::view(_coeffs, xt::all(), xt::range(psize * j, psize * j + psize)); auto result = xt::linalg::dot(B, xt::transpose(C)); basis_view.assign(result); } } } //----------------------------------------------------------------------------- xt::xtensor FiniteElement::base_transformations() const { const std::size_t nt = num_transformations(cell_type()); const std::size_t ndofs = this->dim(); xt::xtensor bt({nt, ndofs, ndofs}); for (std::size_t i = 0; i < nt; ++i) xt::view(bt, i, xt::all(), xt::all()) = xt::eye(ndofs); std::size_t dof_start = 0; int transform_n = 0; if (_cell_tdim > 0) dof_start = std::accumulate(_num_edofs[0].cbegin(), _num_edofs[0].cend(), 0); if (_cell_tdim > 1) { // Base transformations for edges for (int ndofs : _num_edofs[1]) { xt::view(bt, transform_n++, xt::range(dof_start, dof_start + ndofs), xt::range(dof_start, dof_start + ndofs)) = xt::view(_entity_transformations.at(cell::type::interval), 0, xt::all(), xt::all()); dof_start += ndofs; } if (_cell_tdim > 2) { for (std::size_t f = 0; f < _num_edofs[2].size(); ++f) { const int ndofs = _num_edofs[2][f]; if (ndofs > 0) { // TODO: This assumes that every face has the same shape // _entity_transformations should be replaced with a map from a // subentity type to a matrix to allow for prisms and pyramids. xt::view(bt, transform_n++, xt::range(dof_start, dof_start + ndofs), xt::range(dof_start, dof_start + ndofs)) = xt::view( _entity_transformations.at(_cell_subentity_types[2][f]), 0, xt::all(), xt::all()); xt::view(bt, transform_n++, xt::range(dof_start, dof_start + ndofs), xt::range(dof_start, dof_start + ndofs)) = xt::view( _entity_transformations.at(_cell_subentity_types[2][f]), 1, xt::all(), xt::all()); dof_start += ndofs; } } } } return bt; } //----------------------------------------------------------------------------- int FiniteElement::num_points() const { return _points.shape(0); } //----------------------------------------------------------------------------- const xt::xtensor& FiniteElement::points() const { return _points; } //----------------------------------------------------------------------------- xt::xtensor FiniteElement::map_push_forward( const xt::xtensor& U, const xt::xtensor& J, const xtl::span& detJ, const xt::xtensor& K) const { const std::size_t physical_value_size = compute_value_size(_map_type, J.shape(1)); xt::xtensor u({U.shape(0), U.shape(1), physical_value_size}); map_push_forward_m(U, J, detJ, K, u); return u; } //----------------------------------------------------------------------------- xt::xtensor FiniteElement::map_pull_back( const xt::xtensor& u, const xt::xtensor& J, const xtl::span& detJ, const xt::xtensor& K) const { const std::size_t reference_value_size = value_size(); xt::xtensor U({u.shape(0), u.shape(1), reference_value_size}); map_pull_back_m(u, J, detJ, K, U); return U; } //----------------------------------------------------------------------------- void FiniteElement::permute_dofs(const xtl::span& dofs, std::uint32_t cell_info) const { if (!_dof_transformations_are_permutations) { throw std::runtime_error( "The DOF transformations for this element are not permutations"); } if (_dof_transformations_are_identity) return; if (_cell_tdim >= 2) { // This assumes 3 bits are used per face. This will need updating if 3D // cells with faces with more than 4 sides are implemented int face_start = _cell_tdim == 3 ? 3 * _num_edofs[2].size() : 0; int dofstart = std::accumulate(_num_edofs[0].cbegin(), _num_edofs[0].cend(), 0); // Permute DOFs on edges for (std::size_t e = 0; e < _num_edofs[1].size(); ++e) { // Reverse an edge if (cell_info >> (face_start + e) & 1) precompute::apply_permutation(_eperm.at(cell::type::interval)[0], dofs, dofstart); dofstart += _num_edofs[1][e]; } if (_cell_tdim == 3) { // Permute DOFs on faces for (std::size_t f = 0; f < _num_edofs[2].size(); ++f) { // Reflect a face if (cell_info >> (3 * f) & 1) precompute::apply_permutation( _eperm.at(_cell_subentity_types[2][f])[1], dofs, dofstart); // Rotate a face for (std::uint32_t r = 0; r < (cell_info >> (3 * f + 1) & 3); ++r) precompute::apply_permutation( _eperm.at(_cell_subentity_types[2][f])[0], dofs, dofstart); dofstart += _num_edofs[2][f]; } } } } //----------------------------------------------------------------------------- void FiniteElement::unpermute_dofs(const xtl::span& dofs, std::uint32_t cell_info) const { if (!_dof_transformations_are_permutations) { throw std::runtime_error( "The DOF transformations for this element are not permutations"); } if (_dof_transformations_are_identity) return; if (_cell_tdim >= 2) { // This assumes 3 bits are used per face. This will need updating if 3D // cells with faces with more than 4 sides are implemented int face_start = _cell_tdim == 3 ? 3 * _num_edofs[2].size() : 0; int dofstart = std::accumulate(_num_edofs[0].cbegin(), _num_edofs[0].cend(), 0); // Permute DOFs on edges for (std::size_t e = 0; e < _num_edofs[1].size(); ++e) { // Reverse an edge if (cell_info >> (face_start + e) & 1) precompute::apply_permutation(_eperm_rev.at(cell::type::interval)[0], dofs, dofstart); dofstart += _num_edofs[1][e]; } if (_cell_tdim == 3) { // Permute DOFs on faces for (std::size_t f = 0; f < _num_edofs[2].size(); ++f) { // Rotate a face for (std::uint32_t r = 0; r < (cell_info >> (3 * f + 1) & 3); ++r) precompute::apply_permutation( _eperm_rev.at(_cell_subentity_types[2][f])[0], dofs, dofstart); // Reflect a face if (cell_info >> (3 * f) & 1) precompute::apply_permutation( _eperm_rev.at(_cell_subentity_types[2][f])[1], dofs, dofstart); dofstart += _num_edofs[2][f]; } } } } //----------------------------------------------------------------------------- std::map> FiniteElement::entity_transformations() const { return _entity_transformations; } //----------------------------------------------------------------------------- std::string basix::version() { static const std::string version_str = str(BASIX_VERSION); return version_str; } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/finite-element.h000066400000000000000000001300431411115224000175560ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT // FIXME: just include everything for now // Need to define public API #pragma once #include "cell.h" #include "element-families.h" #include "lattice.h" #include "maps.h" #include "precompute.h" #include #include #include #include #include #include #include #include #include #include /// Placeholder namespace basix { /// Calculates the basis functions of the finite element, in terms of the /// polynomial basis. /// /// The below explanation uses Einstein notation. /// /// The basis functions @f${\phi_i}@f$ of a finite element are represented /// as a linear combination of polynomials @f$\{p_j\}@f$ in an underlying /// polynomial basis that span the space of all d-dimensional polynomials up /// to order @f$k \ (P_k^d)@f$: /// \f[ \phi_i = c_{ij} p_j \f] /// /// In some cases, the basis functions @f$\{\phi_i\}@f$ do not span the /// full space @f$P_k@f$, in which case we denote space spanned by the /// basis functions by @f$\{q_k\}@f$, which can be represented by: /// @f[ q_i = b_{ij} p_j. @f] /// This leads to /// @f[ \phi_i = c^{\prime}_{ij} q_j = c^{\prime}_{ij} b_{jk} p_k, @f] /// and in matrix form: /// \f[ /// \phi = C^{\prime} B p /// \f] /// /// If the basis functions span the full space, then @f$ B @f$ is simply /// the identity. /// /// The basis functions @f$\phi_i@f$ are defined by a dual set of functionals /// @f$\{f_i\}@f$. The basis functions are the functions in span{@f$q_k@f$} such /// that /// @f[ f_i(\phi_j) = \delta_{ij} @f] /// and inserting the expression for @f$\phi_{j}@f$: /// @f[ f_i(c^{\prime}_{jk}b_{kl}p_{l}) = c^{\prime}_{jk} b_{kl} f_i \left( /// p_{l} \right) @f] /// /// Defining a matrix D given by applying the functionals to each /// polynomial @f$p_j@f$: /// @f[ [D] = d_{ij},\mbox{ where } d_{ij} = f_i(p_j), @f] /// we have: /// @f[ C^{\prime} B D^{T} = I @f] /// /// and /// /// @f[ C^{\prime} = (B D^{T})^{-1}. @f] /// /// Recalling that @f$C = C^{\prime} B@f$, where @f$C@f$ is the matrix /// form of @f$c_{ij}@f$, /// /// @f[ C = (B D^{T})^{-1} B @f] /// /// This function takes the matrices B (span_coeffs) and D (dual) as /// inputs and returns the matrix C. /// /// Example: Order 1 Lagrange elements on a triangle /// ------------------------------------------------ /// On a triangle, the scalar expansion basis is: /// @f[ p_0 = \sqrt{2}/2 \qquad /// p_1 = \sqrt{3}(2x + y - 1) \qquad /// p_2 = 3y - 1 @f] /// These span the space @f$P_1@f$. /// /// Lagrange order 1 elements span the space P_1, so in this example, /// B (span_coeffs) is the identity matrix: /// @f[ B = \begin{bmatrix} /// 1 & 0 & 0 \\ /// 0 & 1 & 0 \\ /// 0 & 0 & 1 \end{bmatrix} @f] /// /// The functionals defining the Lagrange order 1 space are point /// evaluations at the three vertices of the triangle. The matrix D /// (dual) given by applying these to p_0 to p_2 is: /// @f[ \mbox{dual} = \begin{bmatrix} /// \sqrt{2}/2 & -\sqrt{3} & -1 \\ /// \sqrt{2}/2 & \sqrt{3} & -1 \\ /// \sqrt{2}/2 & 0 & 2 \end{bmatrix} @f] /// /// For this example, this function outputs the matrix: /// @f[ C = \begin{bmatrix} /// \sqrt{2}/3 & -\sqrt{3}/6 & -1/6 \\ /// \sqrt{2}/3 & \sqrt{3}/6 & -1/6 \\ /// \sqrt{2}/3 & 0 & 1/3 \end{bmatrix} @f] /// The basis functions of the finite element can be obtained by applying /// the matrix C to the vector @f$[p_0, p_1, p_2]@f$, giving: /// @f[ \begin{bmatrix} 1 - x - y \\ x \\ y \end{bmatrix} @f] /// /// Example: Order 1 Raviart-Thomas on a triangle /// --------------------------------------------- /// On a triangle, the 2D vector expansion basis is: /// @f[ \begin{matrix} /// p_0 & = & (\sqrt{2}/2, 0) \\ /// p_1 & = & (\sqrt{3}(2x + y - 1), 0) \\ /// p_2 & = & (3y - 1, 0) \\ /// p_3 & = & (0, \sqrt{2}/2) \\ /// p_4 & = & (0, \sqrt{3}(2x + y - 1)) \\ /// p_5 & = & (0, 3y - 1) /// \end{matrix} /// @f] /// These span the space @f$ P_1^2 @f$. /// /// Raviart-Thomas order 1 elements span a space smaller than @f$ P_1^2 @f$, /// so B (span_coeffs) is not the identity. It is given by: /// @f[ B = \begin{bmatrix} /// 1 & 0 & 0 & 0 & 0 & 0 \\ /// 0 & 0 & 0 & 1 & 0 & 0 \\ /// 1/12 & \sqrt{6}/48 & -\sqrt{2}/48 & 1/12 & 0 & \sqrt{2}/24 /// \end{bmatrix} /// @f] /// Applying the matrix B to the vector @f$[p_0, p_1, ..., p_5]@f$ gives the /// basis of the polynomial space for Raviart-Thomas: /// @f[ \begin{bmatrix} /// \sqrt{2}/2 & 0 \\ /// 0 & \sqrt{2}/2 \\ /// \sqrt{2}x/8 & \sqrt{2}y/8 /// \end{bmatrix} @f] /// /// The functionals defining the Raviart-Thomas order 1 space are integral /// of the normal components along each edge. The matrix D (dual) given /// by applying these to @f$p_0@f$ to @f$p_5@f$ is: /// @f[ D = \begin{bmatrix} /// -\sqrt{2}/2 & -\sqrt{3}/2 & -1/2 & -\sqrt{2}/2 & -\sqrt{3}/2 & -1/2 \\ /// -\sqrt{2}/2 & \sqrt{3}/2 & -1/2 & 0 & 0 & 0 \\ /// 0 & 0 & 0 & \sqrt{2}/2 & 0 & -1 /// \end{bmatrix} @f] /// /// In this example, this function outputs the matrix: /// @f[ C = \begin{bmatrix} /// -\sqrt{2}/2 & -\sqrt{3}/2 & -1/2 & -\sqrt{2}/2 & -\sqrt{3}/2 & -1/2 \\ /// -\sqrt{2}/2 & \sqrt{3}/2 & -1/2 & 0 & 0 & 0 \\ /// 0 & 0 & 0 & \sqrt{2}/2 & 0 & -1 /// \end{bmatrix} @f] /// The basis functions of the finite element can be obtained by applying /// the matrix C to the vector @f$[p_0, p_1, ..., p_5]@f$, giving: /// @f[ \begin{bmatrix} /// -x & -y \\ /// x - 1 & y \\ /// -x & 1 - y \end{bmatrix} @f] /// /// @param[in] cell_type The cells shape /// @param[in] B Matrices for the kth value index containing the /// expansion coefficients defining a polynomial basis spanning the /// polynomial space for this element /// @param[in] M The interpolation tensor, such that the dual matrix /// \f$D\f$ is computed by \f$D = MP\f$ /// @param[in] x The interpolation points. The vector index is for /// points on entities of the same dimension, ordered with the lowest /// topological dimension being first. Each 3D tensor hold the points on /// cell entities of a common dimension. The shape of the 3d tensors is /// (num_entities, num_points_per_entity, tdim). /// @param[in] degree The degree of the polynomial basis P used to /// create the element (before applying B) /// @param[in] kappa_tol If positive, the condition number is computed /// and an error thrown if the condition number of \f$B D^{T}\f$ is /// greater than @p kappa_tol. If @p kappa_tol is less than 1 the /// condition number is not checked. /// @return The matrix C of expansion coefficients that define the basis /// functions of the finite element space. The shape is (num_dofs, /// value_size, basis_dim) xt::xtensor compute_expansion_coefficients( cell::type cell_type, const xt::xtensor& B, const std::vector>>& M, const std::vector>>& x, int degree, double kappa_tol = 0.0); /// Finite Element /// The basis is stored as a set of coefficients, which are applied to the /// underlying expansion set for that cell type, when tabulating. class FiniteElement { public: /// @todo Document /// A finite element /// @param[in] family /// @param[in] cell_type /// @param[in] degree /// @param[in] value_shape /// @param[in] coeffs Expansion coefficients. The shape is (num_dofs, /// value_size, basis_dim) /// @param[in] entity_transformations Entity transformations /// @param[in] x Interpolation points. Shape is (tdim, entity index, /// point index, dim) /// @param[in] M The interpolation matrices. Indices are (tdim, entity /// index, dof, vs, point_index) /// @param[in] map_type FiniteElement(element::family family, cell::type cell_type, int degree, const std::vector& value_shape, const xt::xtensor& coeffs, const std::map>& entity_transformations, const std::array>, 4>& x, const std::array>, 4>& M, maps::type map_type = maps::type::identity); /// Copy constructor FiniteElement(const FiniteElement& element) = default; /// Move constructor FiniteElement(FiniteElement&& element) = default; /// Destructor ~FiniteElement() = default; /// Assignment operator FiniteElement& operator=(const FiniteElement& element) = default; /// Move assignment operator FiniteElement& operator=(FiniteElement&& element) = default; /// Compute basis values and derivatives at set of points. /// /// @param[in] nd The order of derivatives, up to and including, to /// compute. Use 0 for the basis functions only. /// @param[in] x The points at which to compute the basis functions. /// The shape of x is (number of points, geometric dimension). /// @return The basis functions (and derivatives). The shape is /// (derivative, point, basis fn index, value index). /// - The first index is the derivative, with higher derivatives are /// stored in triangular (2D) or tetrahedral (3D) ordering, i.e. for /// the (x,y) derivatives in 2D: (0,0), (1,0), (0,1), (2,0), (1,1), /// (0,2), (3,0)... The function basix::idx can be used to find the /// appropriate derivative. /// - The second index is the point index /// - The third index is the basis function index /// - The fourth index is the basis function component. Its has size /// one for scalar basis functions. xt::xtensor tabulate(int nd, const xt::xarray& x) const; /// Direct to memory block tabulation /// @param nd Number of derivatives /// @param x Points /// @param basis_data Memory location to fill void tabulate(int nd, const xt::xarray& x, xt::xtensor& basis_data) const; /// Get the element cell type /// @return The cell type cell::type cell_type() const; /// Get the element polynomial degree /// @return Polynomial degree int degree() const; /// Get the element value size /// This is just a convenience function returning product(value_shape) /// @return Value size int value_size() const; /// Get the element value tensor shape, e.g. returning [1] for scalars. /// @return Value shape const std::vector& value_shape() const; /// Dimension of the finite element space (number of degrees of /// freedom for the element) /// @return Number of degrees of freedom int dim() const; /// Get the finite element family /// @return The family element::family family() const; /// Get the mapping type used for this element /// @return The mapping maps::type mapping_type() const; /// Indicates whether the dof transformations are all permutations /// @return True or False bool dof_transformations_are_permutations() const; /// Indicates whether the dof transformations are all the identity /// @return True or False bool dof_transformations_are_identity() const; /// Map function values from the reference to a physical cell. This /// function can perform the mapping for multiple points, grouped by /// points that share a common Jacobian. /// /// @param U The function values on the reference. The indices are /// [Jacobian index, point index, components]. /// @param J The Jacobian of the mapping. The indices are [Jacobian /// index, J_i, J_j]. /// @param detJ The determinant of the Jacobian of the mapping. It has /// length `J.shape(0)` /// @param K The inverse of the Jacobian of the mapping. The indices /// are [Jacobian index, K_i, K_j]. /// @return The function values on the cell. The indices are [Jacobian /// index, point index, components]. xt::xtensor map_push_forward(const xt::xtensor& U, const xt::xtensor& J, const xtl::span& detJ, const xt::xtensor& K) const; /// Direct to memory push forward /// /// @param[in] U Data defined on the reference element. It must have /// dimension 3. The first index is for the geometric/map data, the /// second is the point index for points that share map data, and the /// third index is (vector) component, e.g. `u[i,:,:]` are points that /// are mapped by `J[i,:,:]`. /// @param[in] J The Jacobians. It must have dimension 3. The first /// index is for the ith Jacobian, i.e. J[i,:,:] is the ith Jacobian. /// @param[in] detJ The determinant of J. `detJ[i]` is equal to /// `det(J[i,:,:])`. It must have dimension 1. @param[in] K The /// inverse of J, `K[i,:,:] = J[i,:,:]^-1`. It must /// have dimension 3. /// @param[out] u The input `U` mapped to the physical. It must have /// dimension 3. template void map_push_forward_m(const O& U, const P& J, const Q& detJ, const S& K, T&& u) const { // FIXME: Should U.shape(2) be replaced by the physical value size? // Can it differ? // Loop over points that share J for (std::size_t i = 0; i < U.shape(0); ++i) { auto _J = xt::view(J, i, xt::all(), xt::all()); auto _K = xt::view(K, i, xt::all(), xt::all()); auto _U = xt::view(U, i, xt::all(), xt::all()); auto _u = xt::view(u, i, xt::all(), xt::all()); maps::apply_map(_u, _U, _J, detJ[i], _K, map_type); } } /// Map function values from a physical cell to the reference /// @param[in] u The function values on the cell /// @param[in] J The Jacobian of the mapping /// @param[in] detJ The determinant of the Jacobian of the mapping /// @param[in] K The inverse of the Jacobian of the mapping /// @return The function values on the reference xt::xtensor map_pull_back(const xt::xtensor& u, const xt::xtensor& J, const xtl::span& detJ, const xt::xtensor& K) const; /// Map function values from a physical cell back to to the reference /// /// @param[in] u Data defined on the physical element. It must have /// dimension 3. The first index is for the geometric/map data, the /// second is the point index for points that share map data, and the /// third index is (vector) component, e.g. `u[i,:,:]` are points that /// are mapped by `J[i,:,:]`. /// @param[in] J The Jacobians. It must have dimension 3. The first /// index is for the ith Jacobian, i.e. J[i,:,:] is the ith Jacobian. /// @param[in] detJ The determinant of J. `detJ[i]` is equal to /// `det(J[i,:,:])`. It must have dimension 1. @param[in] K The /// inverse of J, `K[i,:,:] = J[i,:,:]^-1`. It must /// have dimension 3. /// @param[out] U The input `u` mapped to the reference element. It /// must have dimension 3. template void map_pull_back_m(const O& u, const P& J, const Q& detJ, const S& K, T&& U) const { // Loop over points that share K and K for (std::size_t i = 0; i < u.shape(0); ++i) { auto _J = xt::view(J, i, xt::all(), xt::all()); auto _K = xt::view(K, i, xt::all(), xt::all()); auto _u = xt::view(u, i, xt::all(), xt::all()); auto _U = xt::view(U, i, xt::all(), xt::all()); maps::apply_map(_U, _u, _K, 1.0 / detJ[i], _J, map_type); } } /// Get the number of dofs on each topological entity: (vertices, /// edges, faces, cell) in that order. For example, Lagrange degree 2 /// on a triangle has vertices: [1, 1, 1], edges: [1, 1, 1], cell: [0] /// The sum of the entity dofs must match the total number of dofs /// reported by FiniteElement::dim, /// @code{.cpp} /// const std::vector>& dofs = e.entity_dofs(); /// int num_dofs0 = dofs[1][3]; // Number of dofs associated with edge 3 /// int num_dofs1 = dofs[2][0]; // Number of dofs associated with face 0 /// @endcode /// @return Number of dofs associated with an entity of a given /// topological dimension. The shape is (tdim + 1, num_entities). const std::vector>& num_entity_dofs() const; /// Get the number of dofs on the closure of each topological entity: /// (vertices, edges, faces, cell) in that order. For example, Lagrange degree /// 2 on a triangle has vertices: [1, 1, 1], edges: [3, 3, 3], cell: [6] /// @return Number of dofs associated with the closure of an entity of a given /// topological dimension. The shape is (tdim + 1, num_entities). const std::vector>& num_entity_closure_dofs() const; /// Get the dofs on each topological entity: (vertices, /// edges, faces, cell) in that order. For example, Lagrange degree 2 /// on a triangle has vertices: [[0], [1], [2]], edges: [[3], [4], [5]], /// cell: [[]] /// @return Dofs associated with an entity of a given /// topological dimension. The shape is (tdim + 1, num_entities, num_dofs). const std::vector>>& entity_dofs() const; /// Get the dofs on the closure of each topological entity: (vertices, /// edges, faces, cell) in that order. For example, Lagrange degree 2 /// on a triangle has vertices: [[0], [1], [2]], edges: [[1, 2, 3], [0, 2, 4], /// [0, 1, 5]], cell: [[0, 1, 2, 3, 4, 5]] /// @return Dofs associated with the closre of an entity of a given /// topological dimension. The shape is (tdim + 1, num_entities, num_dofs). const std::vector>>& entity_closure_dofs() const; /// Get the base transformations /// The base transformations represent the effect of rotating or reflecting /// a subentity of the cell on the numbering and orientation of the DOFs. /// This returns a list of matrices with one matrix for each subentity /// permutation in the following order: /// Reversing edge 0, reversing edge 1, ... /// Rotate face 0, reflect face 0, rotate face 1, reflect face 1, ... /// /// Example: Order 3 Lagrange on a triangle /// --------------------------------------- /// This space has 10 dofs arranged like: /// ~~~~~~~~~~~~~~~~ /// 2 /// |\ /// 6 4 /// | \ /// 5 9 3 /// | \ /// 0-7-8-1 /// ~~~~~~~~~~~~~~~~ /// For this element, the base transformations are: /// [Matrix swapping 3 and 4, /// Matrix swapping 5 and 6, /// Matrix swapping 7 and 8] /// The first row shows the effect of reversing the diagonal edge. The /// second row shows the effect of reversing the vertical edge. The third /// row shows the effect of reversing the horizontal edge. /// /// Example: Order 1 Raviart-Thomas on a triangle /// --------------------------------------------- /// This space has 3 dofs arranged like: /// ~~~~~~~~~~~~~~~~ /// |\ /// | \ /// | \ /// <-1 0 /// | / \ /// | L ^ \ /// | | \ /// ---2--- /// ~~~~~~~~~~~~~~~~ /// These DOFs are integrals of normal components over the edges: DOFs 0 and 2 /// are oriented inward, DOF 1 is oriented outwards. /// For this element, the base transformation matrices are: /// ~~~~~~~~~~~~~~~~ /// 0: [[-1, 0, 0], /// [ 0, 1, 0], /// [ 0, 0, 1]] /// 1: [[1, 0, 0], /// [0, -1, 0], /// [0, 0, 1]] /// 2: [[1, 0, 0], /// [0, 1, 0], /// [0, 0, -1]] /// ~~~~~~~~~~~~~~~~ /// The first matrix reverses DOF 0 (as this is on the first edge). The second /// matrix reverses DOF 1 (as this is on the second edge). The third matrix /// reverses DOF 2 (as this is on the third edge). /// /// Example: DOFs on the face of Order 2 Nedelec first kind on a tetrahedron /// ------------------------------------------------------------------------ /// On a face of this tetrahedron, this space has two face tangent DOFs: /// ~~~~~~~~~~~~~~~~ /// |\ |\ /// | \ | \ /// | \ | ^\ /// | \ | | \ /// | 0->\ | 1 \ /// | \ | \ /// ------ ------ /// ~~~~~~~~~~~~~~~~ /// For these DOFs, the subblocks of the base transformation matrices are: /// ~~~~~~~~~~~~~~~~ /// rotation: [[-1, 1], /// [ 1, 0]] /// reflection: [[0, 1], /// [1, 0]] /// ~~~~~~~~~~~~~~~~ xt::xtensor base_transformations() const; /// Return the entity dof transformation matricess std::map> entity_transformations() const; /// Permute the dof numbering on a cell /// @param[in,out] dofs The dof numbering for the cell /// @param cell_info The permutation info for the cell void permute_dofs(const xtl::span& dofs, std::uint32_t cell_info) const; /// Unpermute the dof numbering on a cell /// @param[in,out] dofs The dof numbering for the cell /// @param cell_info The permutation info for the cell void unpermute_dofs(const xtl::span& dofs, std::uint32_t cell_info) const; /// Apply DOF transformations to some data /// @param[in,out] data The data /// @param block_size The number of data points per DOF /// @param cell_info The permutation info for the cell template void apply_dof_transformation(const xtl::span& data, int block_size, std::uint32_t cell_info) const; /// Apply transpose DOF transformations to some data /// @param[in,out] data The data /// @param block_size The number of data points per DOF /// @param cell_info The permutation info for the cell template void apply_transpose_dof_transformation(const xtl::span& data, int block_size, std::uint32_t cell_info) const; /// Apply inverse transpose DOF transformations to some data /// @param[in,out] data The data /// @param block_size The number of data points per DOF /// @param cell_info The permutation info for the cell template void apply_inverse_transpose_dof_transformation( const xtl::span& data, int block_size, std::uint32_t cell_info) const; /// Apply inverse DOF transformations to some data /// @param[in,out] data The data /// @param block_size The number of data points per DOF /// @param cell_info The permutation info for the cell template void apply_inverse_dof_transformation(const xtl::span& data, int block_size, std::uint32_t cell_info) const; /// Apply DOF transformations to some transposed data /// @param[in,out] data The data /// @param block_size The number of data points per DOF /// @param cell_info The permutation info for the cell template void apply_dof_transformation_to_transpose(const xtl::span& data, int block_size, std::uint32_t cell_info) const; /// Apply transpose DOF transformations to some transposed data /// @param[in,out] data The data /// @param block_size The number of data points per DOF /// @param cell_info The permutation info for the cell template void apply_transpose_dof_transformation_to_transpose( const xtl::span& data, int block_size, std::uint32_t cell_info) const; /// Apply inverse transpose DOF transformations to some transposed data /// @param[in,out] data The data /// @param block_size The number of data points per DOF /// @param cell_info The permutation info for the cell template void apply_inverse_transpose_dof_transformation_to_transpose( const xtl::span& data, int block_size, std::uint32_t cell_info) const; /// Apply inverse DOF transformations to some transposed data /// @param[in,out] data The data /// @param block_size The number of data points per DOF /// @param cell_info The permutation info for the cell template void apply_inverse_dof_transformation_to_transpose( const xtl::span& data, int block_size, std::uint32_t cell_info) const; /// Return the interpolation points, i.e. the coordinates on the /// reference element where a function need to be evaluated in order /// to interpolate it in the finite element space. /// @return Array of coordinate with shape `(num_points, tdim)` const xt::xtensor& points() const; /// Return the number of interpolation points int num_points() const; /// Return a matrix of weights interpolation /// To interpolate a function in this finite element, the functions /// should be evaluated at each point given by /// FiniteElement::points(). These function values should then be /// multiplied by the weight matrix to give the coefficients of the /// interpolated function. const xt::xtensor& interpolation_matrix() const; /// Compute the coefficients of a function given the values of the function /// at the interpolation points. /// @param[in,out] coefficients The coefficients of the function's /// interpolation into the function space /// @param[in] data The function evaluated at the points given by `points()` /// @param[in] block_size The block size of the data template void interpolate(const xtl::span& coefficients, const xtl::span& data, const int block_size) const; /// Element map type maps::type map_type; private: // Cell type cell::type _cell_type; // Topological dimension of the cell std::size_t _cell_tdim; // Topological dimension of the cell std::vector> _cell_subentity_types; // Finite element family element::family _family; // Degree int _degree; // Value shape std::vector _value_shape; /// The mapping used to map this element from the reference to a cell maps::type _map_type; // Shape function coefficient of expansion sets on cell. If shape // function is given by @f$\psi_i = \sum_{k} \phi_{k} // \alpha^{i}_{k}@f$, then _coeffs(i, j) = @f$\alpha^i_k@f$. i.e., // _coeffs.row(i) are the expansion coefficients for shape function i // (@f$\psi_{i}@f$). xt::xtensor _coeffs; // Number of dofs associated with each cell (sub-)entity // // The dofs of an element are associated with entities of different // topological dimension (vertices, edges, faces, cells). The dofs are // listed in this order, with vertex dofs first. Each entry is the dof // count on the associated entity, as listed by cell::topology. std::vector> _num_edofs; // Number of dofs associated with the closure of each cell (sub-)entity std::vector> _num_e_closure_dofs; // Dofs associated with each cell (sub-)entity std::vector>> _edofs; // Dofs associated with each cell (sub-)entity std::vector>> _e_closure_dofs; // Entity transformations std::map> _entity_transformations; // Set of points used for point evaluation // Experimental - currently used for an implementation of // "tabulate_dof_coordinates" Most useful for Lagrange. This may change or go // away. For non-Lagrange elements, these points will be used in combination // with _interpolation_matrix to perform interpolation xt::xtensor _points; // Interpolation points on the cell. The shape is (entity_dim, num // entities of given dimension, num_points, tdim) std::array>, 4> _x; /// The interpolation weights and points xt::xtensor _matM; /// Interpolation matrices std::array>, 4> _matM_new; /// Indicates whether or not the DOF transformations are all permutations bool _dof_transformations_are_permutations; /// Indicates whether or not the DOF transformations are all identity bool _dof_transformations_are_identity; /// The entity permutations (factorised). This will only be set if /// _dof_transformations_are_permutations is True and /// _dof_transformations_are_identity is False std::map>> _eperm; /// The reverse entity permutations (factorised). This will only be set if /// _dof_transformations_are_permutations is True and /// _dof_transformations_are_identity is False std::map>> _eperm_rev; /// The entity transformations in precomputed form std::map, std::vector, xt::xtensor>>> _etrans; /// The transposed entity transformations in precomputed form std::map, std::vector, xt::xtensor>>> _etransT; /// The inverse entity transformations in precomputed form std::map, std::vector, xt::xtensor>>> _etrans_inv; /// The inverse transpose entity transformations in precomputed form std::map, std::vector, xt::xtensor>>> _etrans_invT; }; /// Create an element using a given lattice type /// @param[in] family The element family /// @param[in] cell The reference cell type that the element is defined on /// @param[in] degree The degree of the element /// @param[in] lattice_type The lattice type that should be used to arrange DOF /// points of the element FiniteElement create_element(element::family family, cell::type cell, int degree, lattice::type lattice_type); /// Create an element /// @param[in] family The element family /// @param[in] cell The reference cell type that the element is defined on /// @param[in] degree The degree of the element FiniteElement create_element(element::family family, cell::type cell, int degree); /// Return the version number of basix across projects /// @return version string std::string version(); //----------------------------------------------------------------------------- template void FiniteElement::apply_dof_transformation(const xtl::span& data, int block_size, std::uint32_t cell_info) const { if (_dof_transformations_are_identity) return; if (_cell_tdim >= 2) { // This assumes 3 bits are used per face. This will need updating if // 3D cells with faces with more than 4 sides are implemented int face_start = _cell_tdim == 3 ? 3 * _num_edofs[2].size() : 0; int dofstart = std::accumulate(_num_edofs[0].cbegin(), _num_edofs[0].cend(), 0); // Transform DOFs on edges for (std::size_t e = 0; e < _num_edofs[1].size(); ++e) { // Reverse an edge if (cell_info >> (face_start + e) & 1) precompute::apply_matrix(_etrans.at(cell::type::interval)[0], data, dofstart, block_size); dofstart += _num_edofs[1][e]; } if (_cell_tdim == 3) { // Permute DOFs on faces for (std::size_t f = 0; f < _num_edofs[2].size(); ++f) { // Reflect a face if (cell_info >> (3 * f) & 1) precompute::apply_matrix(_etrans.at(_cell_subentity_types[2][f])[1], data, dofstart, block_size); // Rotate a face for (std::uint32_t r = 0; r < (cell_info >> (3 * f + 1) & 3); ++r) precompute::apply_matrix(_etrans.at(_cell_subentity_types[2][f])[0], data, dofstart, block_size); dofstart += _num_edofs[2][f]; } } } } //----------------------------------------------------------------------------- template void FiniteElement::apply_transpose_dof_transformation( const xtl::span& data, int block_size, std::uint32_t cell_info) const { if (_dof_transformations_are_identity) return; if (_cell_tdim >= 2) { // This assumes 3 bits are used per face. This will need updating if // 3D cells with faces with more than 4 sides are implemented int face_start = _cell_tdim == 3 ? 3 * _num_edofs[2].size() : 0; int dofstart = std::accumulate(_num_edofs[0].cbegin(), _num_edofs[0].cend(), 0); // Transform DOFs on edges for (std::size_t e = 0; e < _num_edofs[1].size(); ++e) { // Reverse an edge if (cell_info >> (face_start + e) & 1) precompute::apply_matrix(_etransT.at(cell::type::interval)[0], data, dofstart, block_size); dofstart += _num_edofs[1][e]; } if (_cell_tdim == 3) { // Permute DOFs on faces for (std::size_t f = 0; f < _num_edofs[2].size(); ++f) { // Rotate a face for (std::uint32_t r = 0; r < (cell_info >> (3 * f + 1) & 3); ++r) precompute::apply_matrix(_etransT.at(_cell_subentity_types[2][f])[0], data, dofstart, block_size); // Reflect a face if (cell_info >> (3 * f) & 1) precompute::apply_matrix(_etransT.at(_cell_subentity_types[2][f])[1], data, dofstart, block_size); dofstart += _num_edofs[2][f]; } } } } //----------------------------------------------------------------------------- template void FiniteElement::apply_inverse_transpose_dof_transformation( const xtl::span& data, int block_size, std::uint32_t cell_info) const { if (_dof_transformations_are_identity) return; if (_cell_tdim >= 2) { // This assumes 3 bits are used per face. This will need updating if 3D // cells with faces with more than 4 sides are implemented int face_start = _cell_tdim == 3 ? 3 * _num_edofs[2].size() : 0; int dofstart = std::accumulate(_num_edofs[0].cbegin(), _num_edofs[0].cend(), 0); // Transform DOFs on edges for (std::size_t e = 0; e < _num_edofs[1].size(); ++e) { // Reverse an edge if (cell_info >> (face_start + e) & 1) precompute::apply_matrix(_etrans_invT.at(cell::type::interval)[0], data, dofstart, block_size); dofstart += _num_edofs[1][e]; } if (_cell_tdim == 3) { // Permute DOFs on faces for (std::size_t f = 0; f < _num_edofs[2].size(); ++f) { // Reflect a face if (cell_info >> (3 * f) & 1) precompute::apply_matrix( _etrans_invT.at(_cell_subentity_types[2][f])[1], data, dofstart, block_size); // Rotate a face for (std::uint32_t r = 0; r < (cell_info >> (3 * f + 1) & 3); ++r) precompute::apply_matrix( _etrans_invT.at(_cell_subentity_types[2][f])[0], data, dofstart, block_size); dofstart += _num_edofs[2][f]; } } } } //----------------------------------------------------------------------------- template void FiniteElement::apply_inverse_dof_transformation( const xtl::span& data, int block_size, std::uint32_t cell_info) const { if (_dof_transformations_are_identity) return; if (_cell_tdim >= 2) { // This assumes 3 bits are used per face. This will need updating if 3D // cells with faces with more than 4 sides are implemented int face_start = _cell_tdim == 3 ? 3 * _num_edofs[2].size() : 0; int dofstart = std::accumulate(_num_edofs[0].cbegin(), _num_edofs[0].cend(), 0); // Transform DOFs on edges for (std::size_t e = 0; e < _num_edofs[1].size(); ++e) { // Reverse an edge if (cell_info >> (face_start + e) & 1) precompute::apply_matrix(_etrans_inv.at(cell::type::interval)[0], data, dofstart, block_size); dofstart += _num_edofs[1][e]; } if (_cell_tdim == 3) { // Permute DOFs on faces for (std::size_t f = 0; f < _num_edofs[2].size(); ++f) { // Rotate a face for (std::uint32_t r = 0; r < (cell_info >> (3 * f + 1) & 3); ++r) precompute::apply_matrix( _etrans_inv.at(_cell_subentity_types[2][f])[0], data, dofstart, block_size); // Reflect a face if (cell_info >> (3 * f) & 1) precompute::apply_matrix( _etrans_inv.at(_cell_subentity_types[2][f])[1], data, dofstart, block_size); dofstart += _num_edofs[2][f]; } } } } //----------------------------------------------------------------------------- template void FiniteElement::apply_dof_transformation_to_transpose( const xtl::span& data, int block_size, std::uint32_t cell_info) const { if (_dof_transformations_are_identity) return; if (_cell_tdim >= 2) { // This assumes 3 bits are used per face. This will need updating if // 3D cells with faces with more than 4 sides are implemented int face_start = _cell_tdim == 3 ? 3 * _num_edofs[2].size() : 0; int dofstart = std::accumulate(_num_edofs[0].cbegin(), _num_edofs[0].cend(), 0); // Transform DOFs on edges for (std::size_t e = 0; e < _num_edofs[1].size(); ++e) { // Reverse an edge if (cell_info >> (face_start + e) & 1) precompute::apply_matrix_to_transpose( _etrans.at(cell::type::interval)[0], data, dofstart, block_size); dofstart += _num_edofs[1][e]; } if (_cell_tdim == 3) { // Permute DOFs on faces for (std::size_t f = 0; f < _num_edofs[2].size(); ++f) { // Reflect a face if (cell_info >> (3 * f) & 1) precompute::apply_matrix_to_transpose( _etrans.at(_cell_subentity_types[2][f])[1], data, dofstart, block_size); // Rotate a face for (std::uint32_t r = 0; r < (cell_info >> (3 * f + 1) & 3); ++r) precompute::apply_matrix_to_transpose( _etrans.at(_cell_subentity_types[2][f])[0], data, dofstart, block_size); dofstart += _num_edofs[2][f]; } } } } //----------------------------------------------------------------------------- template void FiniteElement::apply_inverse_transpose_dof_transformation_to_transpose( const xtl::span& data, int block_size, std::uint32_t cell_info) const { if (_dof_transformations_are_identity) return; if (_cell_tdim >= 2) { // This assumes 3 bits are used per face. This will need updating if // 3D cells with faces with more than 4 sides are implemented int face_start = _cell_tdim == 3 ? 3 * _num_edofs[2].size() : 0; int dofstart = std::accumulate(_num_edofs[0].cbegin(), _num_edofs[0].cend(), 0); // Transform DOFs on edges for (std::size_t e = 0; e < _num_edofs[1].size(); ++e) { // Reverse an edge if (cell_info >> (face_start + e) & 1) precompute::apply_matrix_to_transpose( _etrans_invT.at(cell::type::interval)[0], data, dofstart, block_size); dofstart += _num_edofs[1][e]; } if (_cell_tdim == 3) { // Permute DOFs on faces for (std::size_t f = 0; f < _num_edofs[2].size(); ++f) { // Reflect a face if (cell_info >> (3 * f) & 1) precompute::apply_matrix_to_transpose( _etrans_invT.at(_cell_subentity_types[2][f])[1], data, dofstart, block_size); // Rotate a face for (std::uint32_t r = 0; r < (cell_info >> (3 * f + 1) & 3); ++r) precompute::apply_matrix_to_transpose( _etrans_invT.at(_cell_subentity_types[2][f])[0], data, dofstart, block_size); dofstart += _num_edofs[2][f]; } } } } //----------------------------------------------------------------------------- template void FiniteElement::apply_transpose_dof_transformation_to_transpose( const xtl::span& data, int block_size, std::uint32_t cell_info) const { if (_dof_transformations_are_identity) return; if (_cell_tdim >= 2) { // This assumes 3 bits are used per face. This will need updating if // 3D cells with faces with more than 4 sides are implemented int face_start = _cell_tdim == 3 ? 3 * _num_edofs[2].size() : 0; int dofstart = std::accumulate(_num_edofs[0].cbegin(), _num_edofs[0].cend(), 0); // Transform DOFs on edges for (std::size_t e = 0; e < _num_edofs[1].size(); ++e) { // Reverse an edge if (cell_info >> (face_start + e) & 1) precompute::apply_matrix_to_transpose( _etransT.at(cell::type::interval)[0], data, dofstart, block_size); dofstart += _num_edofs[1][e]; } if (_cell_tdim == 3) { // Permute DOFs on faces for (std::size_t f = 0; f < _num_edofs[2].size(); ++f) { // Rotate a face for (std::uint32_t r = 0; r < (cell_info >> (3 * f + 1) & 3); ++r) precompute::apply_matrix_to_transpose( _etransT.at(_cell_subentity_types[2][f])[0], data, dofstart, block_size); // Reflect a face if (cell_info >> (3 * f) & 1) precompute::apply_matrix_to_transpose( _etransT.at(_cell_subentity_types[2][f])[1], data, dofstart, block_size); dofstart += _num_edofs[2][f]; } } } } //----------------------------------------------------------------------------- template void FiniteElement::apply_inverse_dof_transformation_to_transpose( const xtl::span& data, int block_size, std::uint32_t cell_info) const { if (_dof_transformations_are_identity) return; if (_cell_tdim >= 2) { // This assumes 3 bits are used per face. This will need updating if // 3D cells with faces with more than 4 sides are implemented int face_start = _cell_tdim == 3 ? 3 * _num_edofs[2].size() : 0; int dofstart = std::accumulate(_num_edofs[0].cbegin(), _num_edofs[0].cend(), 0); // Transform DOFs on edges for (std::size_t e = 0; e < _num_edofs[1].size(); ++e) { // Reverse an edge if (cell_info >> (face_start + e) & 1) precompute::apply_matrix_to_transpose( _etrans_inv.at(cell::type::interval)[0], data, dofstart, block_size); dofstart += _num_edofs[1][e]; } if (_cell_tdim == 3) { // Permute DOFs on faces for (std::size_t f = 0; f < _num_edofs[2].size(); ++f) { // Rotate a face for (std::uint32_t r = 0; r < (cell_info >> (3 * f + 1) & 3); ++r) precompute::apply_matrix_to_transpose( _etrans_inv.at(_cell_subentity_types[2][f])[0], data, dofstart, block_size); // Reflect a face if (cell_info >> (3 * f) & 1) precompute::apply_matrix_to_transpose( _etrans_inv.at(_cell_subentity_types[2][f])[1], data, dofstart, block_size); dofstart += _num_edofs[2][f]; } } } } //----------------------------------------------------------------------------- template void FiniteElement::interpolate(const xtl::span& coefficients, const xtl::span& data, const int block_size) const { if (block_size != 1) { throw std::runtime_error( "Interpolation of blocked data not implemented yet."); } const std::size_t rows = dim(); // Compute coefficients = Pi * x (matrix-vector multiply) const xt::xtensor& Pi = interpolation_matrix(); assert(Pi.size() % rows == 0); const std::size_t cols = Pi.size() / rows; for (std::size_t i = 0; i < rows; ++i) { // Can be replaced with std::transform_reduce once GCC 8 series dies. // Dot product between row i of the matrix and 'data' coefficients[i] = std::inner_product(std::next(Pi.data(), i * cols), std::next(Pi.data(), i * cols + cols), data.data(), T(0.0)); } } //----------------------------------------------------------------------------- } // namespace basix basix-0.3.0/cpp/basix/indexing.h000066400000000000000000000017611411115224000164620ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT #pragma once namespace basix { /// Compute trivial indexing in a 1D array (for completeness) /// @param p Index in x /// @return 1D Index constexpr int idx(int p) { return p; } /// Compute indexing in a 2D triangular array compressed into a 1D array. /// This can be used to find the index of a derivative returned by /// `FiniteElement::tabulate`. For instance to find d2N/dx2, use /// `FiniteElement::tabulate(2, points)[idx(2, 0)];` /// @param p Index in x /// @param q Index in y /// @return 1D Index constexpr int idx(int p, int q) { return (p + q + 1) * (p + q) / 2 + q; } /// Compute indexing in a 3D tetrahedral array compressed into a 1D array /// @param p Index in x /// @param q Index in y /// @param r Index in z /// @return 1D Index constexpr int idx(int p, int q, int r) { return (p + q + r) * (p + q + r + 1) * (p + q + r + 2) / 6 + (q + r) * (q + r + 1) / 2 + r; } } // namespace basix basix-0.3.0/cpp/basix/lagrange.cpp000066400000000000000000000235141411115224000167700ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson & Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #include "lagrange.h" #include "dof-transformations.h" #include "element-families.h" #include "log.h" #include "maps.h" #include "polyset.h" #include "quadrature.h" #include #include #include #include using namespace basix; //---------------------------------------------------------------------------- FiniteElement basix::create_lagrange(cell::type celltype, int degree, lattice::type lattice_type) { if (celltype == cell::type::point) throw std::runtime_error("Invalid celltype"); const std::size_t tdim = cell::topological_dimension(celltype); const std::size_t ndofs = polyset::dim(celltype, degree); const std::vector>> topology = cell::topology(celltype); std::array>, 4> M; std::array>, 4> x; // Create points at nodes, ordered by topology (vertices first) if (degree == 0) { auto pt = lattice::create(celltype, 0, lattice_type, true); x[tdim].push_back(pt); const std::size_t num_dofs = pt.shape(0); std::array s = {num_dofs, 1, num_dofs}; M[tdim].push_back(xt::xtensor(s)); xt::view(M[tdim][0], xt::all(), 0, xt::all()) = xt::eye(num_dofs); } else { for (std::size_t dim = 0; dim < topology.size(); ++dim) { M[dim].resize(topology[dim].size()); x[dim].resize(topology[dim].size()); // Loop over entities of dimension 'dim' for (std::size_t e = 0; e < topology[dim].size(); ++e) { const xt::xtensor entity_x = cell::sub_entity_geometry(celltype, dim, e); if (dim == 0) { x[dim][e] = entity_x; const std::size_t num_dofs = entity_x.shape(0); M[dim][e] = xt::xtensor( {num_dofs, static_cast(1), num_dofs}); xt::view(M[dim][e], xt::all(), 0, xt::all()) = xt::eye(num_dofs); } else if (dim == tdim) { x[dim][e] = lattice::create(celltype, degree, lattice_type, false); const std::size_t num_dofs = x[dim][e].shape(0); std::array s = {num_dofs, 1, num_dofs}; M[dim][e] = xt::xtensor(s); xt::view(M[dim][e], xt::all(), 0, xt::all()) = xt::eye(num_dofs); } else { cell::type ct = cell::sub_entity_type(celltype, dim, e); const auto lattice = lattice::create(ct, degree, lattice_type, false); const std::size_t num_dofs = lattice.shape(0); std::array s = {num_dofs, 1, num_dofs}; M[dim][e] = xt::xtensor(s); xt::view(M[dim][e], xt::all(), 0, xt::all()) = xt::eye(num_dofs); auto x0s = xt::reshape_view( xt::row(entity_x, 0), {static_cast(1), entity_x.shape(1)}); x[dim][e] = xt::tile(x0s, lattice.shape(0)); auto x0 = xt::row(entity_x, 0); for (std::size_t j = 0; j < lattice.shape(0); ++j) { for (std::size_t k = 0; k < lattice.shape(1); ++k) { xt::row(x[dim][e], j) += (xt::row(entity_x, k + 1) - x0) * lattice(j, k); } } } } } } std::map> entity_transformations; if (tdim > 1) { const std::vector edge_ref = doftransforms::interval_reflection(degree - 1); const std::array shape = {1, edge_ref.size(), edge_ref.size()}; xt::xtensor et = xt::zeros(shape); for (std::size_t i = 0; i < edge_ref.size(); ++i) et(0, i, edge_ref[i]) = 1; entity_transformations[cell::type::interval] = et; } if (celltype == cell::type::tetrahedron or celltype == cell::type::prism or celltype == cell::type::pyramid) { const std::vector face_rot = doftransforms::triangle_rotation(degree - 2); const std::vector face_ref = doftransforms::triangle_reflection(degree - 2); const std::array shape = {2, face_rot.size(), face_rot.size()}; xt::xtensor ft = xt::zeros(shape); for (std::size_t i = 0; i < face_rot.size(); ++i) { ft(0, i, face_rot[i]) = 1; ft(1, i, face_ref[i]) = 1; } entity_transformations[cell::type::triangle] = ft; } if (celltype == cell::type::hexahedron or celltype == cell::type::prism or celltype == cell::type::pyramid) { const std::vector face_rot = doftransforms::quadrilateral_rotation(degree - 1); const std::vector face_ref = doftransforms::quadrilateral_reflection(degree - 1); const std::array shape = {2, face_rot.size(), face_rot.size()}; xt::xtensor ft = xt::zeros(shape); for (std::size_t i = 0; i < face_rot.size(); ++i) { ft(0, i, face_rot[i]) = 1; ft(1, i, face_ref[i]) = 1; } entity_transformations[cell::type::quadrilateral] = ft; } xt::xtensor coeffs = compute_expansion_coefficients( celltype, xt::eye(ndofs), {M[0], M[1], M[2], M[3]}, {x[0], x[1], x[2], x[3]}, degree); return FiniteElement(element::family::P, celltype, degree, {1}, coeffs, entity_transformations, x, M, maps::type::identity); } //----------------------------------------------------------------------------- FiniteElement basix::create_dlagrange(cell::type celltype, int degree) { // Only tabulate for scalar. Vector spaces can easily be built from // the scalar space. const std::size_t ndofs = polyset::dim(celltype, degree); const std::vector>> topology = cell::topology(celltype); const std::size_t tdim = topology.size() - 1; std::array>, 4> M; M[tdim].push_back(xt::xtensor({ndofs, 1, ndofs})); xt::view(M[tdim][0], xt::all(), 0, xt::all()) = xt::eye(ndofs); const auto pt = lattice::create(celltype, degree, lattice::type::equispaced, true); std::array>, 4> x; x[tdim].push_back(pt); std::map> entity_transformations; if (tdim > 1) { entity_transformations[cell::type::interval] = xt::xtensor({1, 0, 0}); } if (celltype == cell::type::tetrahedron or celltype == cell::type::prism or celltype == cell::type::pyramid) { entity_transformations[cell::type::triangle] = xt::xtensor({2, 0, 0}); } if (celltype == cell::type::hexahedron or celltype == cell::type::prism or celltype == cell::type::pyramid) { entity_transformations[cell::type::quadrilateral] = xt::xtensor({2, 0, 0}); } xt::xtensor coeffs = compute_expansion_coefficients( celltype, xt::eye(ndofs), {M[tdim]}, {x[tdim]}, degree); return FiniteElement(element::family::DP, celltype, degree, {1}, coeffs, entity_transformations, x, M, maps::type::identity); } //----------------------------------------------------------------------------- FiniteElement basix::create_dpc(cell::type celltype, int degree) { // Only tabulate for scalar. Vector spaces can easily be built from // the scalar space. cell::type simplex_type; switch (celltype) { case cell::type::interval: simplex_type = cell::type::interval; break; case cell::type::quadrilateral: simplex_type = cell::type::triangle; break; case cell::type::hexahedron: simplex_type = cell::type::tetrahedron; break; default: throw std::runtime_error("Invalid cell type"); } const std::size_t ndofs = polyset::dim(simplex_type, degree); const std::size_t psize = polyset::dim(celltype, degree); auto [pts, _wts] = quadrature::make_quadrature("default", celltype, 2 * degree); auto wts = xt::adapt(_wts); xt::xtensor psi_quad = xt::view( polyset::tabulate(celltype, degree, 0, pts), 0, xt::all(), xt::all()); xt::xtensor psi = xt::view( polyset::tabulate(simplex_type, degree, 0, pts), 0, xt::all(), xt::all()); // Create coefficients for order (degree-1) vector polynomials xt::xtensor wcoeffs = xt::zeros({ndofs, psize}); for (std::size_t i = 0; i < ndofs; ++i) { auto p_i = xt::col(psi, i); for (std::size_t k = 0; k < psize; ++k) wcoeffs(i, k) = xt::sum(wts * p_i * xt::col(psi_quad, k))(); } const std::vector>> topology = cell::topology(celltype); const std::size_t tdim = topology.size() - 1; std::array>, 4> M; M[tdim].push_back(xt::xtensor({ndofs, 1, ndofs})); xt::view(M[tdim][0], xt::all(), 0, xt::all()) = xt::eye(ndofs); const auto pt = lattice::create(simplex_type, degree, lattice::type::equispaced, true); std::array>, 4> x; x[tdim].push_back(pt); std::map> entity_transformations; if (tdim > 1) { entity_transformations[cell::type::interval] = xt::xtensor({1, 0, 0}); } if (tdim == 3) { entity_transformations[cell::type::quadrilateral] = xt::xtensor({2, 0, 0}); } xt::xtensor coeffs = compute_expansion_coefficients( celltype, wcoeffs, {M[tdim]}, {x[tdim]}, degree); return FiniteElement(element::family::DPC, celltype, degree, {1}, coeffs, entity_transformations, x, M, maps::type::identity); } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/lagrange.h000066400000000000000000000022721411115224000164330ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include "cell.h" #include "finite-element.h" #include "lattice.h" namespace basix { /// Create a Lagrange element on cell with given degree /// @param[in] celltype The reference cell type that the element is defined on /// @param[in] degree The degree of the element /// @param[in] lattice_type The lattice type that should be used to arrange DOF /// points of the element /// @return A FiniteElement FiniteElement create_lagrange(cell::type celltype, int degree, lattice::type lattice_type); /// Create a Discontinuous Lagrange element on cell with given degree /// @param[in] celltype The reference cell type that the element is defined on /// @param[in] degree The degree of the element /// @return A FiniteElement FiniteElement create_dlagrange(cell::type celltype, int degree); /// Create a DPC element on cell with given degree /// @param[in] celltype The reference cell type that the element is defined on /// @param[in] degree The degree of the element /// @return A FiniteElement FiniteElement create_dpc(cell::type celltype, int degree); } // namespace basix basix-0.3.0/cpp/basix/lattice.cpp000066400000000000000000000322501411115224000166320ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson and Garth N. Wells // FEniCS Project // SPDX-License-Identifier: MIT #include "lattice.h" #include "cell.h" #include "lagrange.h" #include "quadrature.h" #include #include #include #include #include using namespace basix; namespace { //----------------------------------------------------------------------------- xt::xtensor warp_function(int n, const xt::xtensor& x) { [[maybe_unused]] auto [_pts, wts] = quadrature::compute_gll_rule(n + 1); _pts *= 0.5; for (int i = 0; i < n + 1; ++i) _pts[i] += (0.5 - static_cast(i) / static_cast(n)); std::array shape0 = {(std::size_t)_pts.size()}; xt::xtensor pts = xt::adapt(_pts.data(), _pts.size(), xt::no_ownership(), shape0); FiniteElement L = create_dlagrange(cell::type::interval, n); xt::xtensor v = xt::view(L.tabulate(0, x), 0, xt::all(), xt::all(), 0); return xt::linalg::dot(v, pts); } //----------------------------------------------------------------------------- xt::xtensor create_interval(int n, lattice::type lattice_type, bool exterior) { if (n == 0) return {0.5}; xt::xtensor x; if (exterior) x = xt::linspace(0.0, 1.0, n + 1); else { const double h = 1.0 / static_cast(n); x = xt::linspace(h, 1.0 - h, n - 1); } if (x.shape(0) > 0 and lattice_type == lattice::type::gll) x += warp_function(n, x); return x; } //----------------------------------------------------------------------------- xt::xtensor create_quad(int n, lattice::type lattice_type, bool exterior) { if (n == 0) return {{0.5, 0.5}}; xt::xtensor r; if (exterior) r = xt::linspace(0.0, 1.0, n + 1); else { const double h = 1.0 / static_cast(n); r = xt::linspace(h, 1.0 - h, n - 1); } if (r.shape(0) > 0 and lattice_type == lattice::type::gll) r += warp_function(n, r); const std::size_t m = r.shape(0); xt::xtensor x({m * m, 2}); std::size_t c = 0; for (std::size_t j = 0; j < m; ++j) { for (std::size_t i = 0; i < m; ++i) { x(c, 0) = r(i); x(c, 1) = r(j); c++; } } return x; } //----------------------------------------------------------------------------- xt::xtensor create_hex(int n, lattice::type lattice_type, bool exterior) { if (n == 0) return {{0.5, 0.5, 0.5}}; xt::xtensor r; if (exterior) r = xt::linspace(0.0, 1.0, n + 1); else { const double h = 1.0 / static_cast(n); r = xt::linspace(h, 1.0 - h, n - 1); } if (r.shape(0) > 0 and lattice_type == lattice::type::gll) r += warp_function(n, r); const std::size_t m = r.size(); xt::xtensor x({m * m * m, 3}); int c = 0; for (std::size_t k = 0; k < m; ++k) { for (std::size_t j = 0; j < m; ++j) { for (std::size_t i = 0; i < m; ++i) { x(c, 0) = r[i]; x(c, 1) = r[j]; x(c, 2) = r[k]; c++; } } } return x; } //----------------------------------------------------------------------------- xt::xtensor create_tri(int n, lattice::type lattice_type, bool exterior) { if (n == 0) return {{1.0 / 3.0, 1.0 / 3.0}}; // Warp points: see Hesthaven and Warburton, Nodal Discontinuous // Galerkin Methods, pp. 175-180 const std::size_t b = exterior ? 0 : 1; // Displacement from GLL points in 1D, scaled by 1 /(r * (1 - r)) xt::xtensor r = xt::linspace(0.0, 1.0, 2 * n + 1); xt::xtensor wbar = warp_function(n, r); auto s = xt::view(r, xt::range(1, 2 * n - 1)); xt::view(wbar, xt::range(1, 2 * n - 1)) /= (s * (1 - s)); // Points xt::xtensor p({(n - 3 * b + 1) * (n - 3 * b + 2) / 2, 2}); int c = 0; for (std::size_t j = b; j < (n - b + 1); ++j) { for (std::size_t i = b; i < (n - b + 1 - j); ++i) { const double x = r[2 * i]; const double y = r[2 * j]; p(c, 0) = x; p(c, 1) = y; if (lattice_type == lattice::type::gll) { const std::size_t l = n - j - i; const double a = r[2 * l]; p(c, 0) += x * (a * wbar(n + i - l) + y * wbar(n + i - j)); p(c, 1) += y * (a * wbar(n + j - l) + x * wbar(n + j - i)); } ++c; } } return p; } //----------------------------------------------------------------------------- xt::xtensor create_tet(int n, lattice::type lattice_type, bool exterior) { if (n == 0) return {{0.25, 0.25, 0.25}}; const std::size_t b = exterior ? 0 : 1; xt::xtensor p( {(n - 4 * b + 1) * (n - 4 * b + 2) * (n - 4 * b + 3) / 6, 3}); auto r = xt::linspace(0.0, 1.0, 2 * n + 1); auto wbar = warp_function(n, r); auto s = xt::view(r, xt::range(1, 2 * n - 1)); xt::view(wbar, xt::range(1, 2 * n - 1)) /= s * (1 - s); std::size_t c = 0; for (std::size_t k = b; k < (n - b + 1); ++k) { for (std::size_t j = b; j < (n - b + 1 - k); ++j) { for (std::size_t i = b; i < (n - b + 1 - j - k); ++i) { const std::size_t l = n - k - j - i; const double x = r[2 * i]; const double y = r[2 * j]; const double z = r[2 * k]; const double a = r[2 * l]; p(c, 0) = x; p(c, 1) = y; p(c, 2) = z; if (lattice_type == lattice::type::gll) { const double dx = x * (a * wbar(n + i - l) + y * wbar(n + i - j) + z * wbar(n + i - k)); const double dy = y * (a * wbar(n + j - l) + z * wbar(n + j - k) + x * wbar(n + j - i)); const double dz = z * (a * wbar(n + k - l) + x * wbar(n + k - i) + y * wbar(n + k - j)); p(c, 0) += dx; p(c, 1) += dy; p(c, 2) += dz; } ++c; } } } return p; } //----------------------------------------------------------------------------- xt::xtensor create_prism(int n, lattice::type lattice_type, bool exterior) { if (n == 0) return {{1.0 / 3.0, 1.0 / 3.0, 0.5}}; const xt::xtensor tri_pts = create_tri(n, lattice_type, exterior); const xt::xtensor line_pts = create_interval(n, lattice_type, exterior); xt::xtensor x({tri_pts.shape(0) * line_pts.shape(0), 3}); std::array reps = {line_pts.shape(0), 1}; xt::view(x, xt::all(), xt::range(0, 2)).assign(xt::tile(tri_pts, reps)); for (std::size_t i = 0; i < line_pts.shape(0); ++i) { auto rows = xt::range(i * tri_pts.shape(0), (i + 1) * tri_pts.shape(0)); xt::view(x, rows, 2) = line_pts(i); } return x; } //----------------------------------------------------------------------------- xt::xtensor create_pyramid(int n, lattice::type lattice_type, bool exterior) { if (n == 0) return {{0.4, 0.4, 0.2}}; const double h = 1.0 / static_cast(n); // Interpolate warp factor along interval std::pair, std::vector> pw = quadrature::compute_gll_rule(n + 1); xt::xtensor pts = std::get<0>(pw); pts *= 0.5; for (int i = 0; i < n + 1; ++i) pts[i] += (0.5 - static_cast(i) / static_cast(n)); // Get interpolated value at r in range [-1, 1] FiniteElement L = create_dlagrange(cell::type::interval, n); auto w = [&](double r) -> double { xt::xtensor rr = {0.5 * (r + 1.0)}; xt::xtensor v = xt::view(L.tabulate(0, rr), 0, 0, xt::all(), 0); double d = 0.0; for (std::size_t i = 0; i < pts.shape(0); ++i) d += v[i] * pts[i]; return d; // return v.dot(pts); }; const std::size_t b = (exterior == false) ? 1 : 0; n -= b * 3; std::size_t m = (n + 1) * (n + 2) * (2 * n + 3) / 6; xt::xtensor points({m, 3}); int c = 0; for (int k = 0; k < n + 1; ++k) { for (int j = 0; j < n + 1 - k; ++j) { for (int i = 0; i < n + 1 - k; ++i) { double x = h * (i + b); double y = h * (j + b); double z = h * (k + b); if (lattice_type == lattice::type::gll) { // Barycentric coordinates of triangle in x-z plane const double l1 = x; const double l2 = z; const double l3 = 1 - x - z; // Barycentric coordinates of triangle in y-z plane const double l4 = y; const double l5 = z; const double l6 = 1 - y - z; // b1-b6 are the blending factors for each edge double b1, f1, f2; if (std::fabs(l1) < 1e-12) { b1 = 1.0; f1 = 0.0; f2 = 0.0; } else { b1 = 2.0 * l3 / (2.0 * l3 + l1) * 2.0 * l2 / (2.0 * l2 + l1); f1 = l1 / (l1 + l4); f2 = l1 / (l1 + l6); } // r1-r4 are the edge positions for each of the z>0 edges // calculated so that they use the barycentric coordinates of // the triangle, if the point lies on a triangular face. f1-f4 // are face selecting functions, which blend between adjacent // triangular faces const double r1 = (l2 - l3) * f1 + (l5 - l6) * (1 - f1); const double r2 = (l2 - l3) * f2 + (l5 - l4) * (1 - f2); double b2; if (std::fabs(l2) < 1e-12) b2 = 1.0; else b2 = 2.0 * l3 / (2.0 * l3 + l2) * 2.0 * l1 / (2.0 * l1 + l2); double b3, f3, f4; if (std::fabs(l3) < 1e-12) { b3 = 1.0; f3 = 0.0; f4 = 0.0; } else { b3 = 2.0 * l2 / (2.0 * l2 + l3) * 2.0 * l1 / (2.0 * l1 + l3); f3 = l3 / (l3 + l4); f4 = l3 / (l3 + l6); } const double r3 = (l2 - l1) * f3 + (l5 - l6) * (1.0 - f3); const double r4 = (l2 - l1) * f4 + (l5 - l4) * (1.0 - f4); double b4; if (std::fabs(l4) < 1e-12) b4 = 1.0; else b4 = 2 * l6 / (2.0 * l6 + l4) * 2.0 * l5 / (2.0 * l5 + l4); double b5; if (std::fabs(l5) < 1e-12) b5 = 1.0; else b5 = 2.0 * l6 / (2.0 * l6 + l5) * 2.0 * l4 / (2.0 * l4 + l5); double b6; if (std::fabs(l6) < 1e-12) b6 = 1.0; else b6 = 2.0 * l4 / (2.0 * l4 + l6) * 2.0 * l5 / (2.0 * l5 + l6); double dx = -b3 * b4 * w(r3) - b3 * b6 * w(r4) + b2 * w(l1 - l3); double dy = -b1 * b6 * w(r2) - b3 * b6 * w(r4) + b5 * w(l4 - l6); double dz = b1 * b4 * w(r1) + b1 * b6 * w(r2) + b3 * b4 * w(r3) + b3 * b6 * w(r4); x += dx; y += dy; z += dz; } points(c, 0) = x; points(c, 1) = y; points(c, 2) = z; c++; } } } return points; } } // namespace //----------------------------------------------------------------------------- xt::xtensor lattice::create(cell::type celltype, int n, lattice::type type, bool exterior) { switch (celltype) { case cell::type::point: return {{0.0}}; case cell::type::interval: { xt::xtensor x = create_interval(n, type, exterior); std::array s = {x.shape(0), 1}; return xt::reshape_view(x, s); } case cell::type::triangle: return create_tri(n, type, exterior); case cell::type::tetrahedron: return create_tet(n, type, exterior); case cell::type::quadrilateral: return create_quad(n, type, exterior); case cell::type::hexahedron: return create_hex(n, type, exterior); case cell::type::prism: return create_prism(n, type, exterior); case cell::type::pyramid: return create_pyramid(n, type, exterior); default: throw std::runtime_error("Unsupported cell for lattice"); } } //----------------------------------------------------------------------------- lattice::type lattice::str_to_type(std::string name) { static const std::map name_to_type = {{"equispaced", lattice::type::equispaced}, {"gll", lattice::type::gll}}; auto it = name_to_type.find(name); if (it == name_to_type.end()) throw std::runtime_error("Can't find name " + name); return it->second; } //----------------------------------------------------------------------------- std::string lattice::type_to_str(lattice::type type) { static const std::map name_to_type = {{lattice::type::equispaced, "equispaced"}, {lattice::type::gll, "gll"}}; auto it = name_to_type.find(type); if (it == name_to_type.end()) throw std::runtime_error("Can't find type"); return it->second; } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/lattice.h000066400000000000000000000040721411115224000163000ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson & Garth Wells // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include "cell.h" #include #include namespace basix::lattice { /// The type of point spacing to be used in a lattice. /// lattice::type::equispaced represents equally spaced points /// on an interval and a regularly spaced set of points on other /// shapes. lattice::type:gll represents the GLL (Gauss-Lobatto-Legendre) /// points on an interval. Fot other shapes, the points used are obtained /// by warping an equispaced grid of points, as described in Hesthaven and /// Warburton, Nodal Discontinuous Galerkin Methods, 2008, pp 175-180 /// (https://doi.org/10.1007/978-0-387-72067-8). enum class type { equispaced = 0, gll = 1 }; /// Convert string to a lattice type lattice::type str_to_type(std::string name); // Convert family to string std::string type_to_str(lattice::type type); /// Create a lattice of points on a reference cell /// optionally including the outer surface points /// /// For a given celltype, this creates a set of points on a regular grid /// which covers the cell, e.g. for a quadrilateral, with n=2, the points are: /// [0,0],[0.5,0],[1,0],[0,0.5],[0.5,0.5],[1,0.5],[0,1],[0.5,1],[1,1] /// If the parameter exterior is set to false, the points lying on the external /// boundary are omitted, in this case for a quadrilateral with n=2, the points /// are: [0.5,0.5]. The lattice type can be chosen as "equispaced" or /// "gll". The "gll" lattice has points spaced along each edge at /// the Gauss-Lobatto-Legendre quadrature points. These are the same as /// "equispaced" when n<3. /// /// @param celltype The cell::type /// @param n Size in each direction. There are n+1 points along each edge of the /// cell. /// @param type Either lattice::type::equispaced or lattice::type::gll /// @param exterior If set, includes outer boundaries /// @return Set of points xt::xtensor create(cell::type celltype, int n, lattice::type type, bool exterior); } // namespace basix::lattice basix-0.3.0/cpp/basix/log.cpp000066400000000000000000000002041411115224000157600ustar00rootroot00000000000000// Copyright (c) 2019 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT #include "log.h" #include "loguru.cpp" basix-0.3.0/cpp/basix/log.h000066400000000000000000000002761411115224000154360ustar00rootroot00000000000000// Copyright (c) 2019 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #define LOGURU_WITH_STREAMS 1 #define LOGURU_REPLACE_GLOG 1 #include "loguru.hpp" basix-0.3.0/cpp/basix/loguru.cpp000066400000000000000000001711331411115224000165260ustar00rootroot00000000000000#ifndef _WIN32 // Disable all warnings from gcc/clang: #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wpragmas" #pragma GCC diagnostic ignored "-Wc++98-compat" #pragma GCC diagnostic ignored "-Wc++98-compat-pedantic" #pragma GCC diagnostic ignored "-Wexit-time-destructors" #pragma GCC diagnostic ignored "-Wformat-nonliteral" #pragma GCC diagnostic ignored "-Wglobal-constructors" #pragma GCC diagnostic ignored "-Wgnu-zero-variadic-macro-arguments" #pragma GCC diagnostic ignored "-Wmissing-prototypes" #pragma GCC diagnostic ignored "-Wpadded" #pragma GCC diagnostic ignored "-Wsign-compare" #pragma GCC diagnostic ignored "-Wsign-conversion" #pragma GCC diagnostic ignored "-Wunknown-pragmas" #pragma GCC diagnostic ignored "-Wunused-macros" #pragma GCC diagnostic ignored "-Wzero-as-null-pointer-constant" #else #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4018) #endif // _MSC_VER #endif #include "loguru.hpp" #ifndef LOGURU_HAS_BEEN_IMPLEMENTED #define LOGURU_HAS_BEEN_IMPLEMENTED #define LOGURU_PREAMBLE_WIDTH \ (53 + LOGURU_THREADNAME_WIDTH + LOGURU_FILENAME_WIDTH) #undef min #undef max #include #include #include #include #include #include #include #include #include #include #include #include #if LOGURU_SYSLOG #include #else #define LOG_USER 0 #endif #ifdef _WIN32 #include #define localtime_r(a, b) \ localtime_s(b, a) // No localtime_r with MSVC, but arguments are swapped for // localtime_s #else #include #include // mkdir #include // STDERR_FILENO #endif #ifdef __linux__ #include // PATH_MAX #elif !defined(_WIN32) #include // PATH_MAX #endif #ifndef PATH_MAX #define PATH_MAX 1024 #endif #ifdef __APPLE__ #include "TargetConditionals.h" #endif // TODO: use defined(_POSIX_VERSION) for some of these things? #if defined(_WIN32) || defined(__CYGWIN__) #define LOGURU_PTHREADS 0 #define LOGURU_WINTHREADS 1 #ifndef LOGURU_STACKTRACES #define LOGURU_STACKTRACES 0 #endif #elif defined(__rtems__) || defined(__ANDROID__) || defined(__FreeBSD__) #define LOGURU_PTHREADS 1 #define LOGURU_WINTHREADS 0 #ifndef LOGURU_STACKTRACES #define LOGURU_STACKTRACES 0 #endif #else #define LOGURU_PTHREADS 1 #define LOGURU_WINTHREADS 0 #ifndef LOGURU_STACKTRACES #define LOGURU_STACKTRACES 1 #endif #endif #if LOGURU_STACKTRACES #include // for __cxa_demangle #include // for dladdr #include // for backtrace #endif // LOGURU_STACKTRACES #if LOGURU_PTHREADS #include #if defined(__FreeBSD__) #include #include #elif defined(__OpenBSD__) #include #endif #ifdef __linux__ /* On Linux, the default thread name is the same as the name of the binary. Additionally, all new threads inherit the name of the thread it got forked from. For this reason, Loguru use the pthread Thread Local Storage for storing thread names on Linux. */ #ifndef LOGURU_PTLS_NAMES #define LOGURU_PTLS_NAMES 1 #endif #endif #endif #if LOGURU_WINTHREADS #ifndef _WIN32_WINNT #define _WIN32_WINNT 0x0502 #endif #define WIN32_LEAN_AND_MEAN #define NOMINMAX #include #endif #ifndef LOGURU_PTLS_NAMES #define LOGURU_PTLS_NAMES 0 #endif namespace loguru { using namespace std::chrono; #if LOGURU_WITH_FILEABS struct FileAbs { char path[PATH_MAX]; char mode_str[4]; Verbosity verbosity; struct stat st; FILE* fp; bool is_reopening = false; // to prevent recursive call in file_reopen. decltype(steady_clock::now()) last_check_time = steady_clock::now(); }; #else typedef FILE* FileAbs; #endif struct Callback { std::string id; log_handler_t callback; void* user_data; Verbosity verbosity; // Does not change! close_handler_t close; flush_handler_t flush; unsigned indentation; }; using CallbackVec = std::vector; using StringPair = std::pair; using StringPairList = std::vector; const auto s_start_time = steady_clock::now(); Verbosity g_stderr_verbosity = Verbosity_0; bool g_colorlogtostderr = true; unsigned g_flush_interval_ms = 0; bool g_preamble_header = true; bool g_preamble = true; Verbosity g_internal_verbosity = Verbosity_0; // Preamble details bool g_preamble_date = true; bool g_preamble_time = true; bool g_preamble_uptime = true; bool g_preamble_thread = true; bool g_preamble_file = true; bool g_preamble_verbose = true; bool g_preamble_pipe = true; static std::recursive_mutex s_mutex; static Verbosity s_max_out_verbosity = Verbosity_OFF; static std::string s_argv0_filename; static std::string s_arguments; static char s_current_dir[PATH_MAX]; static CallbackVec s_callbacks; static fatal_handler_t s_fatal_handler = nullptr; static verbosity_to_name_t s_verbosity_to_name_callback = nullptr; static name_to_verbosity_t s_name_to_verbosity_callback = nullptr; static StringPairList s_user_stack_cleanups; static bool s_strip_file_path = true; static std::atomic s_stderr_indentation{0}; // For periodic flushing: static std::thread* s_flush_thread = nullptr; static bool s_needs_flushing = false; static SignalOptions s_signal_options = SignalOptions::none(); static const bool s_terminal_has_color = []() { #ifdef _WIN32 #ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING #define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004 #endif HANDLE hOut = GetStdHandle(STD_OUTPUT_HANDLE); if (hOut != INVALID_HANDLE_VALUE) { DWORD dwMode = 0; GetConsoleMode(hOut, &dwMode); dwMode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING; return SetConsoleMode(hOut, dwMode) != 0; } return false; #else if (!isatty(STDERR_FILENO)) { return false; } if (const char* term = getenv("TERM")) { return 0 == strcmp(term, "cygwin") || 0 == strcmp(term, "linux") || 0 == strcmp(term, "rxvt-unicode-256color") || 0 == strcmp(term, "screen") || 0 == strcmp(term, "screen-256color") || 0 == strcmp(term, "screen.xterm-256color") || 0 == strcmp(term, "tmux-256color") || 0 == strcmp(term, "xterm") || 0 == strcmp(term, "xterm-256color") || 0 == strcmp(term, "xterm-termite") || 0 == strcmp(term, "xterm-color"); } else { return false; } #endif }(); static void print_preamble_header(char* out_buff, size_t out_buff_size); // ------------------------------------------------------------------------------ // Colors bool terminal_has_color() { return s_terminal_has_color; } // Colors #ifdef _WIN32 #define VTSEQ(ID) ("\x1b[1;" #ID "m") #else #define VTSEQ(ID) ("\x1b[" #ID "m") #endif const char* terminal_black() { return s_terminal_has_color ? VTSEQ(30) : ""; } const char* terminal_red() { return s_terminal_has_color ? VTSEQ(31) : ""; } const char* terminal_green() { return s_terminal_has_color ? VTSEQ(32) : ""; } const char* terminal_yellow() { return s_terminal_has_color ? VTSEQ(33) : ""; } const char* terminal_blue() { return s_terminal_has_color ? VTSEQ(34) : ""; } const char* terminal_purple() { return s_terminal_has_color ? VTSEQ(35) : ""; } const char* terminal_cyan() { return s_terminal_has_color ? VTSEQ(36) : ""; } const char* terminal_light_gray() { return s_terminal_has_color ? VTSEQ(37) : ""; } const char* terminal_white() { return s_terminal_has_color ? VTSEQ(37) : ""; } const char* terminal_light_red() { return s_terminal_has_color ? VTSEQ(91) : ""; } const char* terminal_dim() { return s_terminal_has_color ? VTSEQ(2) : ""; } // Formating const char* terminal_bold() { return s_terminal_has_color ? VTSEQ(1) : ""; } const char* terminal_underline() { return s_terminal_has_color ? VTSEQ(4) : ""; } // You should end each line with this! const char* terminal_reset() { return s_terminal_has_color ? VTSEQ(0) : ""; } // ------------------------------------------------------------------------------ #if LOGURU_WITH_FILEABS void file_reopen(void* user_data); inline FILE* to_file(void* user_data) { return reinterpret_cast(user_data)->fp; } #else inline FILE* to_file(void* user_data) { return reinterpret_cast(user_data); } #endif void file_log(void* user_data, const Message& message) { #if LOGURU_WITH_FILEABS FileAbs* file_abs = reinterpret_cast(user_data); if (file_abs->is_reopening) { return; } // It is better checking file change every minute/hour/day, // instead of doing this every time we log. // Here check_interval is set to zero to enable checking every time; const auto check_interval = seconds(0); if (duration_cast(steady_clock::now() - file_abs->last_check_time) > check_interval) { file_abs->last_check_time = steady_clock::now(); file_reopen(user_data); } FILE* file = to_file(user_data); if (!file) { return; } #else FILE* file = to_file(user_data); #endif fprintf(file, "%s%s%s%s\n", message.preamble, message.indentation, message.prefix, message.message); if (g_flush_interval_ms == 0) { fflush(file); } } void file_close(void* user_data) { FILE* file = to_file(user_data); if (file) { fclose(file); } #if LOGURU_WITH_FILEABS delete reinterpret_cast(user_data); #endif } void file_flush(void* user_data) { FILE* file = to_file(user_data); fflush(file); } #if LOGURU_WITH_FILEABS void file_reopen(void* user_data) { FileAbs* file_abs = reinterpret_cast(user_data); struct stat st; int ret; if (!file_abs->fp || (ret = stat(file_abs->path, &st)) == -1 || (st.st_ino != file_abs->st.st_ino)) { file_abs->is_reopening = true; if (file_abs->fp) { fclose(file_abs->fp); } if (!file_abs->fp) { VLOG_F(g_internal_verbosity, "Reopening file '" LOGURU_FMT(s) "' due to previous error", file_abs->path); } else if (ret < 0) { const auto why = errno_as_text(); VLOG_F(g_internal_verbosity, "Reopening file '" LOGURU_FMT(s) "' due to '" LOGURU_FMT(s) "'", file_abs->path, why.c_str()); } else { VLOG_F(g_internal_verbosity, "Reopening file '" LOGURU_FMT(s) "' due to file changed", file_abs->path); } // try reopen current file. if (!create_directories(file_abs->path)) { LOG_F(ERROR, "Failed to create directories to '" LOGURU_FMT(s) "'", file_abs->path); } file_abs->fp = fopen(file_abs->path, file_abs->mode_str); if (!file_abs->fp) { LOG_F(ERROR, "Failed to open '" LOGURU_FMT(s) "'", file_abs->path); } else { stat(file_abs->path, &file_abs->st); } file_abs->is_reopening = false; } } #endif // ------------------------------------------------------------------------------ // ------------------------------------------------------------------------------ #if LOGURU_SYSLOG void syslog_log(void* /*user_data*/, const Message& message) { /* Level 0: Is reserved for kernel panic type situations. Level 1: Is for Major resource failure. Level 2->7 Application level failures */ int level; if (message.verbosity < Verbosity_FATAL) { level = 1; // System Alert } else { switch (message.verbosity) { case Verbosity_FATAL: level = 2; break; // System Critical case Verbosity_ERROR: level = 3; break; // System Error case Verbosity_WARNING: level = 4; break; // System Warning case Verbosity_INFO: level = 5; break; // System Notice case Verbosity_1: level = 6; break; // System Info default: level = 7; break; // System Debug } } // Note: We don't add the time info. // This is done automatically by the syslog deamon. // Otherwise log all information that the file log does. syslog(level, "%s%s%s", message.indentation, message.prefix, message.message); } void syslog_close(void* /*user_data*/) { closelog(); } void syslog_flush(void* /*user_data*/) {} #endif // ------------------------------------------------------------------------------ // Helpers: Text::~Text() { free(_str); } #if LOGURU_USE_FMTLIB Text vtextprintf(const char* format, fmt::format_args args) { return Text(STRDUP(fmt::vformat(format, args).c_str())); } #else LOGURU_PRINTF_LIKE(1, 0) static Text vtextprintf(const char* format, va_list vlist) { #ifdef _WIN32 int bytes_needed = _vscprintf(format, vlist); CHECK_F(bytes_needed >= 0, "Bad string format: '%s'", format); char* buff = (char*)malloc(bytes_needed + 1); vsnprintf(buff, bytes_needed + 1, format, vlist); return Text(buff); #else char* buff = nullptr; int result = vasprintf(&buff, format, vlist); CHECK_F(result >= 0, "Bad string format: '" LOGURU_FMT(s) "'", format); return Text(buff); #endif } Text textprintf(const char* format, ...) { va_list vlist; va_start(vlist, format); auto result = vtextprintf(format, vlist); va_end(vlist); return result; } #endif // Overloaded for variadic template matching. Text textprintf() { return Text(static_cast(calloc(1, 1))); } static const char* indentation(unsigned depth) { static const char buff[] = ". . . . . . . . . . " ". . . . . . . . . . " ". . . . . . . . . . " ". . . . . . . . . . " ". . . . . . . . . . " ". . . . . . . . . . " ". . . . . . . . . . " ". . . . . . . . . . " ". . . . . . . . . . " ". . . . . . . . . . "; static const size_t INDENTATION_WIDTH = 4; static const size_t NUM_INDENTATIONS = (sizeof(buff) - 1) / INDENTATION_WIDTH; depth = std::min(depth, NUM_INDENTATIONS); return buff + INDENTATION_WIDTH * (NUM_INDENTATIONS - depth); } static void parse_args(int& argc, char* argv[], const char* verbosity_flag) { int arg_dest = 1; int out_argc = argc; for (int arg_it = 1; arg_it < argc; ++arg_it) { auto cmd = argv[arg_it]; auto arg_len = strlen(verbosity_flag); if (strncmp(cmd, verbosity_flag, arg_len) == 0 && !std::isalpha(cmd[arg_len], std::locale(""))) { out_argc -= 1; auto value_str = cmd + arg_len; if (value_str[0] == '\0') { // Value in separate argument arg_it += 1; CHECK_LT_F(arg_it, argc, "Missing verbosiy level after " LOGURU_FMT(s) "", verbosity_flag); value_str = argv[arg_it]; out_argc -= 1; } if (*value_str == '=') { value_str += 1; } auto req_verbosity = get_verbosity_from_name(value_str); if (req_verbosity != Verbosity_INVALID) { g_stderr_verbosity = req_verbosity; } else { char* end = 0; g_stderr_verbosity = static_cast(strtol(value_str, &end, 10)); CHECK_F(end && *end == '\0', "Invalid verbosity. Expected integer, INFO, WARNING, ERROR or " "OFF, got '" LOGURU_FMT(s) "'", value_str); } } else { argv[arg_dest++] = argv[arg_it]; } } argc = out_argc; argv[argc] = nullptr; } static long long now_ns() { return duration_cast( high_resolution_clock::now().time_since_epoch()) .count(); } // Returns the part of the path after the last / or \ (if any). const char* filename(const char* path) { for (auto ptr = path; *ptr; ++ptr) { if (*ptr == '/' || *ptr == '\\') { path = ptr + 1; } } return path; } // ------------------------------------------------------------------------------ static void on_atexit() { VLOG_F(g_internal_verbosity, "atexit"); flush(); } static void install_signal_handlers(const SignalOptions& signal_options); static void write_hex_digit(std::string& out, unsigned num) { DCHECK_LT_F(num, 16u); if (num < 10u) { out.push_back(char('0' + num)); } else { out.push_back(char('A' + num - 10)); } } static void write_hex_byte(std::string& out, uint8_t n) { write_hex_digit(out, n >> 4u); write_hex_digit(out, n & 0x0f); } static void escape(std::string& out, const std::string& str) { for (char c : str) { /**/ if (c == '\a') { out += "\\a"; } else if (c == '\b') { out += "\\b"; } else if (c == '\f') { out += "\\f"; } else if (c == '\n') { out += "\\n"; } else if (c == '\r') { out += "\\r"; } else if (c == '\t') { out += "\\t"; } else if (c == '\v') { out += "\\v"; } else if (c == '\\') { out += "\\\\"; } else if (c == '\'') { out += "\\\'"; } else if (c == '\"') { out += "\\\""; } else if (c == ' ') { out += "\\ "; } else if (0 <= c && c < 0x20) { // ASCI control character: // else if (c < 0x20 || c != (c & 127)) { // ASCII control character or // UTF-8: out += "\\x"; write_hex_byte(out, static_cast(c)); } else { out += c; } } } Text errno_as_text() { char buff[256]; #if defined(__GLIBC__) && defined(_GNU_SOURCE) // GNU Version return Text(STRDUP(strerror_r(errno, buff, sizeof(buff)))); #elif defined(__APPLE__) || _POSIX_C_SOURCE >= 200112L // XSI Version strerror_r(errno, buff, sizeof(buff)); return Text(strdup(buff)); #elif defined(_WIN32) strerror_s(buff, sizeof(buff), errno); return Text(STRDUP(buff)); #else // Not thread-safe. return Text(STRDUP(strerror(errno))); #endif } void init(int& argc, char* argv[], const Options& options) { CHECK_GT_F(argc, 0, "Expected proper argc/argv"); CHECK_EQ_F(argv[argc], nullptr, "Expected proper argc/argv"); s_argv0_filename = filename(argv[0]); #ifdef _WIN32 #define getcwd _getcwd #endif if (!getcwd(s_current_dir, sizeof(s_current_dir))) { const auto error_text = errno_as_text(); LOG_F(WARNING, "Failed to get current working directory: " LOGURU_FMT(s) "", error_text.c_str()); } s_arguments = ""; for (int i = 0; i < argc; ++i) { escape(s_arguments, argv[i]); if (i + 1 < argc) { s_arguments += " "; } } if (options.verbosity_flag) { parse_args(argc, argv, options.verbosity_flag); } if (const auto main_thread_name = options.main_thread_name) { #if LOGURU_PTLS_NAMES || LOGURU_WINTHREADS set_thread_name(main_thread_name); #elif LOGURU_PTHREADS char old_thread_name[16] = {0}; auto this_thread = pthread_self(); #if defined(__APPLE__) || defined(__linux__) || defined(__sun) pthread_getname_np(this_thread, old_thread_name, sizeof(old_thread_name)); #endif if (old_thread_name[0] == 0) { #ifdef __APPLE__ pthread_setname_np(main_thread_name); #elif defined(__FreeBSD__) || defined(__OpenBSD__) pthread_set_name_np(this_thread, main_thread_name); #elif defined(__linux__) || defined(__sun) pthread_setname_np(this_thread, main_thread_name); #endif } #endif // LOGURU_PTHREADS } if (g_stderr_verbosity >= Verbosity_INFO) { if (g_preamble_header) { char preamble_explain[LOGURU_PREAMBLE_WIDTH]; print_preamble_header(preamble_explain, sizeof(preamble_explain)); if (g_colorlogtostderr && s_terminal_has_color) { fprintf(stderr, "%s%s%s\n", terminal_reset(), terminal_dim(), preamble_explain); } else { fprintf(stderr, "%s\n", preamble_explain); } } fflush(stderr); } VLOG_F(g_internal_verbosity, "arguments: " LOGURU_FMT(s) "", s_arguments.c_str()); if (strlen(s_current_dir) != 0) { VLOG_F(g_internal_verbosity, "Current dir: " LOGURU_FMT(s) "", s_current_dir); } VLOG_F(g_internal_verbosity, "stderr verbosity: " LOGURU_FMT(d) "", g_stderr_verbosity); VLOG_F(g_internal_verbosity, "-----------------------------------"); install_signal_handlers(options.signals); atexit(on_atexit); } void shutdown() { VLOG_F(g_internal_verbosity, "loguru::shutdown()"); remove_all_callbacks(); set_fatal_handler(nullptr); set_verbosity_to_name_callback(nullptr); set_name_to_verbosity_callback(nullptr); } void write_date_time(char* buff, size_t buff_size) { auto now = system_clock::now(); long long ms_since_epoch = duration_cast(now.time_since_epoch()).count(); time_t sec_since_epoch = time_t(ms_since_epoch / 1000); tm time_info; localtime_r(&sec_since_epoch, &time_info); snprintf(buff, buff_size, "%04d%02d%02d_%02d%02d%02d.%03lld", 1900 + time_info.tm_year, 1 + time_info.tm_mon, time_info.tm_mday, time_info.tm_hour, time_info.tm_min, time_info.tm_sec, ms_since_epoch % 1000); } const char* argv0_filename() { return s_argv0_filename.c_str(); } const char* arguments() { return s_arguments.c_str(); } const char* current_dir() { return s_current_dir; } const char* home_dir() { #ifdef _WIN32 char* user_profile; size_t len; errno_t err = _dupenv_s(&user_profile, &len, "USERPROFILE"); CHECK_F(err != 0, "Missing USERPROFILE"); return user_profile; #else // _WIN32 auto home = getenv("HOME"); CHECK_F(home != nullptr, "Missing HOME"); return home; #endif // _WIN32 } void suggest_log_path(const char* prefix, char* buff, unsigned buff_size) { if (prefix[0] == '~') { snprintf(buff, buff_size - 1, "%s%s", home_dir(), prefix + 1); } else { snprintf(buff, buff_size - 1, "%s", prefix); } // Check for terminating / size_t n = strlen(buff); if (n != 0) { if (buff[n - 1] != '/') { CHECK_F(n + 2 < buff_size, "Filename buffer too small"); buff[n] = '/'; buff[n + 1] = '\0'; } } #ifdef _WIN32 strncat_s(buff, buff_size - strlen(buff) - 1, s_argv0_filename.c_str(), buff_size - strlen(buff) - 1); strncat_s(buff, buff_size - strlen(buff) - 1, "/", buff_size - strlen(buff) - 1); write_date_time(buff + strlen(buff), buff_size - strlen(buff)); strncat_s(buff, buff_size - strlen(buff) - 1, ".log", buff_size - strlen(buff) - 1); #else strncat(buff, s_argv0_filename.c_str(), buff_size - strlen(buff) - 1); strncat(buff, "/", buff_size - strlen(buff) - 1); write_date_time(buff + strlen(buff), buff_size - strlen(buff)); strncat(buff, ".log", buff_size - strlen(buff) - 1); #endif } bool create_directories(const char* file_path_const) { CHECK_F(file_path_const && *file_path_const); char* file_path = STRDUP(file_path_const); for (char* p = strchr(file_path + 1, '/'); p; p = strchr(p + 1, '/')) { *p = '\0'; #ifdef _WIN32 if (_mkdir(file_path) == -1) { #else if (mkdir(file_path, 0755) == -1) { #endif if (errno != EEXIST) { LOG_F(ERROR, "Failed to create directory '" LOGURU_FMT(s) "'", file_path); LOG_IF_F(ERROR, errno == EACCES, "EACCES"); LOG_IF_F(ERROR, errno == ENAMETOOLONG, "ENAMETOOLONG"); LOG_IF_F(ERROR, errno == ENOENT, "ENOENT"); LOG_IF_F(ERROR, errno == ENOTDIR, "ENOTDIR"); LOG_IF_F(ERROR, errno == ELOOP, "ELOOP"); *p = '/'; free(file_path); return false; } } *p = '/'; } free(file_path); return true; } // namespace loguru bool add_file(const char* path_in, FileMode mode, Verbosity verbosity) { char path[PATH_MAX]; if (path_in[0] == '~') { snprintf(path, sizeof(path) - 1, "%s%s", home_dir(), path_in + 1); } else { snprintf(path, sizeof(path) - 1, "%s", path_in); } if (!create_directories(path)) { LOG_F(ERROR, "Failed to create directories to '" LOGURU_FMT(s) "'", path); } const char* mode_str = (mode == FileMode::Truncate ? "w" : "a"); FILE* file; #ifdef _WIN32 errno_t file_error = fopen_s(&file, path, mode_str); if (file_error) { #else file = fopen(path, mode_str); if (!file) { #endif LOG_F(ERROR, "Failed to open '" LOGURU_FMT(s) "'", path); return false; } #if LOGURU_WITH_FILEABS FileAbs* file_abs = new FileAbs(); // this is deleted in file_close; snprintf(file_abs->path, sizeof(file_abs->path) - 1, "%s", path); snprintf(file_abs->mode_str, sizeof(file_abs->mode_str) - 1, "%s", mode_str); stat(file_abs->path, &file_abs->st); file_abs->fp = file; file_abs->verbosity = verbosity; add_callback(path_in, file_log, file_abs, verbosity, file_close, file_flush); #else add_callback(path_in, file_log, file, verbosity, file_close, file_flush); #endif if (mode == FileMode::Append) { fprintf(file, "\n\n\n\n\n"); } if (!s_arguments.empty()) { fprintf(file, "arguments: %s\n", s_arguments.c_str()); } if (strlen(s_current_dir) != 0) { fprintf(file, "Current dir: %s\n", s_current_dir); } fprintf(file, "File verbosity level: %d\n", verbosity); if (g_preamble_header) { char preamble_explain[LOGURU_PREAMBLE_WIDTH]; print_preamble_header(preamble_explain, sizeof(preamble_explain)); fprintf(file, "%s\n", preamble_explain); } fflush(file); VLOG_F(g_internal_verbosity, "Logging to '" LOGURU_FMT(s) "', mode: '" LOGURU_FMT( s) "', verbosity: " LOGURU_FMT(d) "", path, mode_str, verbosity); return true; } /* Will add syslog as a standard sink for log messages Any logging message with a verbosity lower or equal to the given verbosity will be included. This works for Unix like systems (i.e. Linux/Mac) There is no current implementation for Windows (as I don't know the equivalent calls or have a way to test them). If you know please add and send a pull request. The code should still compile under windows but will only generate a warning message that syslog is unavailable. Search for LOGURU_SYSLOG to find and fix. */ bool add_syslog(const char* app_name, Verbosity verbosity) { return add_syslog(app_name, verbosity, LOG_USER); } bool add_syslog(const char* app_name, Verbosity verbosity, int facility) { #if LOGURU_SYSLOG if (app_name == nullptr) { app_name = argv0_filename(); } openlog(app_name, 0, facility); add_callback("'syslog'", syslog_log, nullptr, verbosity, syslog_close, syslog_flush); VLOG_F(g_internal_verbosity, "Logging to 'syslog' , verbosity: " LOGURU_FMT(d) "", verbosity); return true; #else (void)app_name; (void)verbosity; (void)facility; VLOG_F(g_internal_verbosity, "syslog not implemented on this system. Request " "to install syslog logging ignored."); return false; #endif } // Will be called right before abort(). void set_fatal_handler(fatal_handler_t handler) { s_fatal_handler = handler; } fatal_handler_t get_fatal_handler() { return s_fatal_handler; } void set_verbosity_to_name_callback(verbosity_to_name_t callback) { s_verbosity_to_name_callback = callback; } void set_name_to_verbosity_callback(name_to_verbosity_t callback) { s_name_to_verbosity_callback = callback; } void add_stack_cleanup(const char* find_this, const char* replace_with_this) { if (strlen(find_this) <= strlen(replace_with_this)) { LOG_F(WARNING, "add_stack_cleanup: the replacement should be shorter than " "the pattern!"); return; } s_user_stack_cleanups.push_back(StringPair(find_this, replace_with_this)); } static void on_callback_change() { s_max_out_verbosity = Verbosity_OFF; for (const auto& callback : s_callbacks) { s_max_out_verbosity = std::max(s_max_out_verbosity, callback.verbosity); } } void add_callback(const char* id, log_handler_t callback, void* user_data, Verbosity verbosity, close_handler_t on_close, flush_handler_t on_flush) { std::lock_guard lock(s_mutex); s_callbacks.push_back( Callback{id, callback, user_data, verbosity, on_close, on_flush, 0}); on_callback_change(); } // Returns a custom verbosity name if one is available, or nullptr. // See also set_verbosity_to_name_callback. const char* get_verbosity_name(Verbosity verbosity) { auto name = s_verbosity_to_name_callback ? (*s_verbosity_to_name_callback)(verbosity) : nullptr; // Use standard replacements if callback fails: if (!name) { if (verbosity <= Verbosity_FATAL) { name = "FATL"; } else if (verbosity == Verbosity_ERROR) { name = "ERR"; } else if (verbosity == Verbosity_WARNING) { name = "WARN"; } else if (verbosity == Verbosity_INFO) { name = "INFO"; } } return name; } // Returns Verbosity_INVALID if the name is not found. // See also set_name_to_verbosity_callback. Verbosity get_verbosity_from_name(const char* name) { auto verbosity = s_name_to_verbosity_callback ? (*s_name_to_verbosity_callback)(name) : Verbosity_INVALID; // Use standard replacements if callback fails: if (verbosity == Verbosity_INVALID) { if (strcmp(name, "OFF") == 0) { verbosity = Verbosity_OFF; } else if (strcmp(name, "INFO") == 0) { verbosity = Verbosity_INFO; } else if (strcmp(name, "WARNING") == 0) { verbosity = Verbosity_WARNING; } else if (strcmp(name, "ERROR") == 0) { verbosity = Verbosity_ERROR; } else if (strcmp(name, "FATAL") == 0) { verbosity = Verbosity_FATAL; } } return verbosity; } bool remove_callback(const char* id) { std::lock_guard lock(s_mutex); auto it = std::find_if(begin(s_callbacks), end(s_callbacks), [&](const Callback& c) { return c.id == id; }); if (it != s_callbacks.end()) { if (it->close) { it->close(it->user_data); } s_callbacks.erase(it); on_callback_change(); return true; } else { LOG_F(ERROR, "Failed to locate callback with id '" LOGURU_FMT(s) "'", id); return false; } } void remove_all_callbacks() { std::lock_guard lock(s_mutex); for (auto& callback : s_callbacks) { if (callback.close) { callback.close(callback.user_data); } } s_callbacks.clear(); on_callback_change(); } // Returns the maximum of g_stderr_verbosity and all file/custom outputs. Verbosity current_verbosity_cutoff() { return g_stderr_verbosity > s_max_out_verbosity ? g_stderr_verbosity : s_max_out_verbosity; } // ------------------------------------------------------------------------ // Threads names #if LOGURU_PTLS_NAMES static pthread_once_t s_pthread_key_once = PTHREAD_ONCE_INIT; static pthread_key_t s_pthread_key_name; void make_pthread_key_name() { (void)pthread_key_create(&s_pthread_key_name, free); } #endif #if LOGURU_WINTHREADS // Where we store the custom thread name set by `set_thread_name` char* thread_name_buffer() { __declspec(thread) static char thread_name[LOGURU_THREADNAME_WIDTH + 1] = {0}; return &thread_name[0]; } #endif // LOGURU_WINTHREADS void set_thread_name(const char* name) { #if LOGURU_PTLS_NAMES // Store thread name in thread-local storage at `s_pthread_key_name` (void)pthread_once(&s_pthread_key_once, make_pthread_key_name); (void)pthread_setspecific(s_pthread_key_name, STRDUP(name)); #elif LOGURU_PTHREADS // Tell the OS the thread name #ifdef __APPLE__ pthread_setname_np(name); #elif defined(__FreeBSD__) || defined(__OpenBSD__) pthread_set_name_np(pthread_self(), name); #elif defined(__linux__) || defined(__sun) pthread_setname_np(pthread_self(), name); #endif #elif LOGURU_WINTHREADS // Store thread name in a thread-local storage: strncpy_s(thread_name_buffer(), LOGURU_THREADNAME_WIDTH + 1, name, _TRUNCATE); #else // LOGURU_PTHREADS // TODO: on these weird platforms we should also store the thread name // in a generic thread-local storage. (void)name; #endif // LOGURU_PTHREADS } void get_thread_name(char* buffer, unsigned long long length, bool right_align_hex_id) { CHECK_NE_F(length, 0u, "Zero length buffer in get_thread_name"); CHECK_NOTNULL_F(buffer, "nullptr in get_thread_name"); #if LOGURU_PTLS_NAMES (void)pthread_once(&s_pthread_key_once, make_pthread_key_name); if (const char* name = static_cast(pthread_getspecific(s_pthread_key_name))) { snprintf(buffer, length, "%s", name); } else { buffer[0] = 0; } #elif LOGURU_PTHREADS // Ask the OS about the thread name. // This is what we *want* to do on all platforms, but // only some platforms support it (currently). pthread_getname_np(pthread_self(), buffer, length); #elif LOGURU_WINTHREADS snprintf(buffer, (size_t)length, "%s", thread_name_buffer()); #else // Thread names unsupported buffer[0] = 0; #endif if (buffer[0] == 0) { // We failed to get a readable thread name. // Write a HEX thread ID instead. // We try to get an ID that is the same as the ID you could // read in your debugger, system monitor etc. #ifdef __APPLE__ uint64_t thread_id; pthread_threadid_np(pthread_self(), &thread_id); #elif defined(__FreeBSD__) long thread_id; (void)thr_self(&thread_id); #elif LOGURU_PTHREADS uint64_t thread_id = pthread_self(); #else // This ID does not correllate to anything we can get from the OS, // so this is the worst way to get the ID. const auto thread_id = std::hash{}(std::this_thread::get_id()); #endif if (right_align_hex_id) { snprintf(buffer, length, "%*X", static_cast(length - 1), static_cast(thread_id)); } else { snprintf(buffer, length, "%X", static_cast(thread_id)); } } } // ------------------------------------------------------------------------ // Stack traces #if LOGURU_STACKTRACES Text demangle(const char* name) { int status = -1; char* demangled = abi::__cxa_demangle(name, 0, 0, &status); Text result{status == 0 ? demangled : STRDUP(name)}; return result; } #if LOGURU_RTTI template std::string type_name() { auto demangled = demangle(typeid(T).name()); return demangled.c_str(); } #endif // LOGURU_RTTI static const StringPairList REPLACE_LIST = { #if LOGURU_RTTI {type_name(), "std::string"}, {type_name(), "std::wstring"}, {type_name(), "std::u16string"}, {type_name(), "std::u32string"}, #endif // LOGURU_RTTI {"std::__1::", "std::"}, {"__thiscall ", ""}, {"__cdecl ", ""}, }; void do_replacements(const StringPairList& replacements, std::string& str) { for (auto&& p : replacements) { if (p.first.size() <= p.second.size()) { // On gcc, "type_name()" is "std::string" continue; } size_t it; while ((it = str.find(p.first)) != std::string::npos) { str.replace(it, p.first.size(), p.second); } } } std::string prettify_stacktrace(const std::string& input) { std::string output = input; do_replacements(s_user_stack_cleanups, output); do_replacements(REPLACE_LIST, output); try { std::regex std_allocator_re(R"(,\s*std::allocator<[^<>]+>)"); output = std::regex_replace(output, std_allocator_re, std::string("")); std::regex template_spaces_re(R"(<\s*([^<> ]+)\s*>)"); output = std::regex_replace(output, template_spaces_re, std::string("<$1>")); } catch (std::regex_error&) { // Probably old GCC. } return output; } std::string stacktrace_as_stdstring(int skip) { // From https://gist.github.com/fmela/591333 void* callstack[128]; const auto max_frames = sizeof(callstack) / sizeof(callstack[0]); int num_frames = backtrace(callstack, max_frames); char** symbols = backtrace_symbols(callstack, num_frames); std::string result; // Print stack traces so the most relevant ones are written last // Rationale: // http://yellerapp.com/posts/2015-01-22-upside-down-stacktraces.html for (int i = num_frames - 1; i >= skip; --i) { char buf[1024]; Dl_info info; if (dladdr(callstack[i], &info) && info.dli_sname) { char* demangled = NULL; int status = -1; if (info.dli_sname[0] == '_') { demangled = abi::__cxa_demangle(info.dli_sname, 0, 0, &status); } snprintf(buf, sizeof(buf), "%-3d %*p %s + %zd\n", i - skip, int(2 + sizeof(void*) * 2), callstack[i], status == 0 ? demangled : info.dli_sname == 0 ? symbols[i] : info.dli_sname, static_cast(callstack[i]) - static_cast(info.dli_saddr)); free(demangled); } else { snprintf(buf, sizeof(buf), "%-3d %*p %s\n", i - skip, int(2 + sizeof(void*) * 2), callstack[i], symbols[i]); } result += buf; } free(symbols); if (num_frames == max_frames) { result = "[truncated]\n" + result; } if (!result.empty() && result[result.size() - 1] == '\n') { result.resize(result.size() - 1); } return prettify_stacktrace(result); } #else // LOGURU_STACKTRACES Text demangle(const char* name) { return Text(STRDUP(name)); } std::string stacktrace_as_stdstring(int) { // No stacktraces available on this platform" return ""; } #endif // LOGURU_STACKTRACES Text stacktrace(int skip) { auto str = stacktrace_as_stdstring(skip + 1); return Text(STRDUP(str.c_str())); } // ------------------------------------------------------------------------ static void print_preamble_header(char* out_buff, size_t out_buff_size) { if (out_buff_size == 0) { return; } out_buff[0] = '\0'; long pos = 0; if (g_preamble_date && pos < out_buff_size) { pos += snprintf(out_buff + pos, out_buff_size - pos, "date "); } if (g_preamble_time && pos < out_buff_size) { pos += snprintf(out_buff + pos, out_buff_size - pos, "time "); } if (g_preamble_uptime && pos < out_buff_size) { pos += snprintf(out_buff + pos, out_buff_size - pos, "( uptime ) "); } if (g_preamble_thread && pos < out_buff_size) { pos += snprintf(out_buff + pos, out_buff_size - pos, "[%-*s]", LOGURU_THREADNAME_WIDTH, " thread name/id"); } if (g_preamble_file && pos < out_buff_size) { pos += snprintf(out_buff + pos, out_buff_size - pos, "%*s:line ", LOGURU_FILENAME_WIDTH, "file"); } if (g_preamble_verbose && pos < out_buff_size) { pos += snprintf(out_buff + pos, out_buff_size - pos, " v"); } if (g_preamble_pipe && pos < out_buff_size) { pos += snprintf(out_buff + pos, out_buff_size - pos, "| "); } } static void print_preamble(char* out_buff, size_t out_buff_size, Verbosity verbosity, const char* file, unsigned line) { if (out_buff_size == 0) { return; } out_buff[0] = '\0'; if (!g_preamble) { return; } long long ms_since_epoch = duration_cast(system_clock::now().time_since_epoch()) .count(); time_t sec_since_epoch = time_t(ms_since_epoch / 1000); tm time_info; localtime_r(&sec_since_epoch, &time_info); auto uptime_ms = duration_cast(steady_clock::now() - s_start_time).count(); auto uptime_sec = static_cast(uptime_ms) / 1000.0; char thread_name[LOGURU_THREADNAME_WIDTH + 1] = {0}; get_thread_name(thread_name, LOGURU_THREADNAME_WIDTH + 1, true); if (s_strip_file_path) { file = filename(file); } char level_buff[6]; const char* custom_level_name = get_verbosity_name(verbosity); if (custom_level_name) { snprintf(level_buff, sizeof(level_buff) - 1, "%s", custom_level_name); } else { snprintf(level_buff, sizeof(level_buff) - 1, "% 4d", verbosity); } long pos = 0; if (g_preamble_date && pos < out_buff_size) { pos += snprintf(out_buff + pos, out_buff_size - pos, "%04d-%02d-%02d ", 1900 + time_info.tm_year, 1 + time_info.tm_mon, time_info.tm_mday); } if (g_preamble_time && pos < out_buff_size) { pos += snprintf(out_buff + pos, out_buff_size - pos, "%02d:%02d:%02d.%03lld ", time_info.tm_hour, time_info.tm_min, time_info.tm_sec, ms_since_epoch % 1000); } if (g_preamble_uptime && pos < out_buff_size) { pos += snprintf(out_buff + pos, out_buff_size - pos, "(%8.3fs) ", uptime_sec); } if (g_preamble_thread && pos < out_buff_size) { pos += snprintf(out_buff + pos, out_buff_size - pos, "[%-*s]", LOGURU_THREADNAME_WIDTH, thread_name); } if (g_preamble_file && pos < out_buff_size) { char shortened_filename[LOGURU_FILENAME_WIDTH + 1]; snprintf(shortened_filename, LOGURU_FILENAME_WIDTH + 1, "%s", file); pos += snprintf(out_buff + pos, out_buff_size - pos, "%*s:%-5u ", LOGURU_FILENAME_WIDTH, shortened_filename, line); } if (g_preamble_verbose && pos < out_buff_size) { pos += snprintf(out_buff + pos, out_buff_size - pos, "%4s", level_buff); } if (g_preamble_pipe && pos < out_buff_size) { pos += snprintf(out_buff + pos, out_buff_size - pos, "| "); } } // stack_trace_skip is just if verbosity == FATAL. static void log_message(int stack_trace_skip, Message& message, bool with_indentation, bool abort_if_fatal) { const auto verbosity = message.verbosity; std::lock_guard lock(s_mutex); if (message.verbosity == Verbosity_FATAL) { auto st = loguru::stacktrace(stack_trace_skip + 2); if (!st.empty()) { RAW_LOG_F(ERROR, "Stack trace:\n" LOGURU_FMT(s) "", st.c_str()); } auto ec = loguru::get_error_context(); if (!ec.empty()) { RAW_LOG_F(ERROR, "" LOGURU_FMT(s) "", ec.c_str()); } } if (with_indentation) { message.indentation = indentation(s_stderr_indentation); } if (verbosity <= g_stderr_verbosity) { if (g_colorlogtostderr && s_terminal_has_color) { if (verbosity > Verbosity_WARNING) { fprintf(stderr, "%s%s%s%s%s%s%s%s\n", terminal_reset(), terminal_dim(), message.preamble, message.indentation, verbosity == Verbosity_INFO ? terminal_reset() : "", // un-dim for info message.prefix, message.message, terminal_reset()); } else { fprintf(stderr, "%s%s%s%s%s%s%s\n", terminal_reset(), verbosity == Verbosity_WARNING ? terminal_yellow() : terminal_red(), message.preamble, message.indentation, message.prefix, message.message, terminal_reset()); } } else { fprintf(stderr, "%s%s%s%s\n", message.preamble, message.indentation, message.prefix, message.message); } if (g_flush_interval_ms == 0) { fflush(stderr); } else { s_needs_flushing = true; } } for (auto& p : s_callbacks) { if (verbosity <= p.verbosity) { if (with_indentation) { message.indentation = indentation(p.indentation); } p.callback(p.user_data, message); if (g_flush_interval_ms == 0) { if (p.flush) { p.flush(p.user_data); } } else { s_needs_flushing = true; } } } if (g_flush_interval_ms > 0 && !s_flush_thread) { s_flush_thread = new std::thread([]() { for (;;) { if (s_needs_flushing) { flush(); } std::this_thread::sleep_for( std::chrono::milliseconds(g_flush_interval_ms)); } }); } if (message.verbosity == Verbosity_FATAL) { flush(); if (s_fatal_handler) { s_fatal_handler(message); flush(); } if (abort_if_fatal) { #if !defined(_WIN32) if (s_signal_options.sigabrt) { // Make sure we don't catch our own abort: signal(SIGABRT, SIG_DFL); } #endif abort(); } } } // stack_trace_skip is just if verbosity == FATAL. void log_to_everywhere(int stack_trace_skip, Verbosity verbosity, const char* file, unsigned line, const char* prefix, const char* buff) { char preamble_buff[LOGURU_PREAMBLE_WIDTH]; print_preamble(preamble_buff, sizeof(preamble_buff), verbosity, file, line); auto message = Message{verbosity, file, line, preamble_buff, "", prefix, buff}; log_message(stack_trace_skip + 1, message, true, true); } #if LOGURU_USE_FMTLIB void vlog(Verbosity verbosity, const char* file, unsigned line, const char* format, fmt::format_args args) { auto formatted = fmt::vformat(format, args); log_to_everywhere(1, verbosity, file, line, "", formatted.c_str()); } void raw_vlog(Verbosity verbosity, const char* file, unsigned line, const char* format, fmt::format_args args) { auto formatted = fmt::vformat(format, args); auto message = Message{verbosity, file, line, "", "", "", formatted.c_str()}; log_message(1, message, false, true); } #else void log(Verbosity verbosity, const char* file, unsigned line, const char* format, ...) { va_list vlist; va_start(vlist, format); auto buff = vtextprintf(format, vlist); log_to_everywhere(1, verbosity, file, line, "", buff.c_str()); va_end(vlist); } void raw_log(Verbosity verbosity, const char* file, unsigned line, const char* format, ...) { va_list vlist; va_start(vlist, format); auto buff = vtextprintf(format, vlist); auto message = Message{verbosity, file, line, "", "", "", buff.c_str()}; log_message(1, message, false, true); va_end(vlist); } #endif void flush() { std::lock_guard lock(s_mutex); fflush(stderr); for (const auto& callback : s_callbacks) { if (callback.flush) { callback.flush(callback.user_data); } } s_needs_flushing = false; } LogScopeRAII::LogScopeRAII(Verbosity verbosity, const char* file, unsigned line, const char* format, ...) : _verbosity(verbosity), _file(file), _line(line) { if (verbosity <= current_verbosity_cutoff()) { std::lock_guard lock(s_mutex); _indent_stderr = (verbosity <= g_stderr_verbosity); _start_time_ns = now_ns(); va_list vlist; va_start(vlist, format); vsnprintf(_name, sizeof(_name), format, vlist); log_to_everywhere(1, _verbosity, file, line, "{ ", _name); va_end(vlist); if (_indent_stderr) { ++s_stderr_indentation; } for (auto& p : s_callbacks) { if (verbosity <= p.verbosity) { ++p.indentation; } } } else { _file = nullptr; } } LogScopeRAII::~LogScopeRAII() { if (_file) { std::lock_guard lock(s_mutex); if (_indent_stderr && s_stderr_indentation > 0) { --s_stderr_indentation; } for (auto& p : s_callbacks) { // Note: Callback indentation cannot change! if (_verbosity <= p.verbosity) { // in unlikely case this callback is new if (p.indentation > 0) { --p.indentation; } } } #if LOGURU_VERBOSE_SCOPE_ENDINGS auto duration_sec = static_cast(now_ns() - _start_time_ns) / 1e9; #if LOGURU_USE_FMTLIB auto buff = textprintf("{:.{}f} s: {:s}", duration_sec, LOGURU_SCOPE_TIME_PRECISION, _name); #else auto buff = textprintf("%.*f s: %s", LOGURU_SCOPE_TIME_PRECISION, duration_sec, _name); #endif log_to_everywhere(1, _verbosity, _file, _line, "} ", buff.c_str()); #else log_to_everywhere(1, _verbosity, _file, _line, "}", ""); #endif } } #if LOGURU_USE_FMTLIB void vlog_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line, const char* format, fmt::format_args args) { auto formatted = fmt::vformat(format, args); log_to_everywhere(stack_trace_skip + 1, Verbosity_FATAL, file, line, expr, formatted.c_str()); abort(); // log_to_everywhere already does this, but this makes the analyzer // happy. } #else void log_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line, const char* format, ...) { va_list vlist; va_start(vlist, format); auto buff = vtextprintf(format, vlist); log_to_everywhere(stack_trace_skip + 1, Verbosity_FATAL, file, line, expr, buff.c_str()); va_end(vlist); abort(); // log_to_everywhere already does this, but this makes the analyzer // happy. } #endif void log_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line) { log_and_abort(stack_trace_skip + 1, expr, file, line, " "); } // ---------------------------------------------------------------------------- // Streams: #if LOGURU_USE_FMTLIB template std::string vstrprintf(const char* format, const Args&... args) { auto text = textprintf(format, args...); std::string result = text.c_str(); return result; } template std::string strprintf(const char* format, const Args&... args) { return vstrprintf(format, args...); } #else std::string vstrprintf(const char* format, va_list vlist) { auto text = vtextprintf(format, vlist); std::string result = text.c_str(); return result; } std::string strprintf(const char* format, ...) { va_list vlist; va_start(vlist, format); auto result = vstrprintf(format, vlist); va_end(vlist); return result; } #endif #if LOGURU_WITH_STREAMS StreamLogger::~StreamLogger() noexcept(false) { auto message = _ss.str(); log(_verbosity, _file, _line, LOGURU_FMT(s), message.c_str()); } AbortLogger::~AbortLogger() noexcept(false) { auto message = _ss.str(); loguru::log_and_abort(1, _expr, _file, _line, LOGURU_FMT(s), message.c_str()); } #endif // LOGURU_WITH_STREAMS // ---------------------------------------------------------------------------- // 888888 88""Yb 88""Yb dP"Yb 88""Yb dP""b8 dP"Yb 88b 88 888888 888888 // Yb dP 888888 88__ 88__dP 88__dP dP Yb 88__dP dP `" dP Yb 88Yb88 // 88 88__ YbdP 88 88"" 88"Yb 88"Yb Yb dP 88"Yb Yb Yb dP // 88 Y88 88 88"" dPYb 88 888888 88 Yb 88 Yb YbodP 88 Yb YboodP // YbodP 88 Y8 88 888888 dP Yb 88 // ---------------------------------------------------------------------------- struct StringStream { std::string str; }; // Use this in your EcPrinter implementations. void stream_print(StringStream& out_string_stream, const char* text) { out_string_stream.str += text; } // ---------------------------------------------------------------------------- using ECPtr = EcEntryBase*; #if defined(_WIN32) || (defined(__APPLE__) && !TARGET_OS_IPHONE) #ifdef __APPLE__ #define LOGURU_THREAD_LOCAL __thread #else #define LOGURU_THREAD_LOCAL thread_local #endif static LOGURU_THREAD_LOCAL ECPtr thread_ec_ptr = nullptr; ECPtr& get_thread_ec_head_ref() { return thread_ec_ptr; } #else // !thread_local static pthread_once_t s_ec_pthread_once = PTHREAD_ONCE_INIT; static pthread_key_t s_ec_pthread_key; void free_ec_head_ref(void* io_error_context) { delete reinterpret_cast(io_error_context); } void ec_make_pthread_key() { (void)pthread_key_create(&s_ec_pthread_key, free_ec_head_ref); } ECPtr& get_thread_ec_head_ref() { (void)pthread_once(&s_ec_pthread_once, ec_make_pthread_key); auto ec = reinterpret_cast(pthread_getspecific(s_ec_pthread_key)); if (ec == nullptr) { ec = new ECPtr(nullptr); (void)pthread_setspecific(s_ec_pthread_key, ec); } return *ec; } #endif // !thread_local // ---------------------------------------------------------------------------- EcHandle get_thread_ec_handle() { return get_thread_ec_head_ref(); } Text get_error_context() { return get_error_context_for(get_thread_ec_head_ref()); } Text get_error_context_for(const EcEntryBase* ec_head) { std::vector stack; while (ec_head) { stack.push_back(ec_head); ec_head = ec_head->_previous; } std::reverse(stack.begin(), stack.end()); StringStream result; if (!stack.empty()) { result.str += "------------------------------------------------\n"; for (auto entry : stack) { const auto description = std::string(entry->_descr) + ":"; #if LOGURU_USE_FMTLIB auto prefix = textprintf("[ErrorContext] {.{}s}:{:-5u} {:-20s} ", filename(entry->_file), LOGURU_FILENAME_WIDTH, entry->_line, description.c_str()); #else auto prefix = textprintf("[ErrorContext] %*s:%-5u %-20s ", LOGURU_FILENAME_WIDTH, filename(entry->_file), entry->_line, description.c_str()); #endif result.str += prefix.c_str(); entry->print_value(result); result.str += "\n"; } result.str += "------------------------------------------------"; } return Text(STRDUP(result.str.c_str())); } EcEntryBase::EcEntryBase(const char* file, unsigned line, const char* descr) : _file(file), _line(line), _descr(descr) { EcEntryBase*& ec_head = get_thread_ec_head_ref(); _previous = ec_head; ec_head = this; } EcEntryBase::~EcEntryBase() { get_thread_ec_head_ref() = _previous; } // ------------------------------------------------------------------------ Text ec_to_text(const char* value) { // Add quotes around the string to make it obvious where it begin and ends. // This is great for detecting erroneous leading or trailing spaces in e.g. an // identifier. auto str = "\"" + std::string(value) + "\""; return Text{STRDUP(str.c_str())}; } Text ec_to_text(char c) { // Add quotes around the character to make it obvious where it begin and ends. std::string str = "'"; auto write_hex_digit = [&](unsigned num) { if (num < 10u) { str += char('0' + num); } else { str += char('a' + num - 10); } }; auto write_hex_16 = [&](uint16_t n) { write_hex_digit((n >> 12u) & 0x0f); write_hex_digit((n >> 8u) & 0x0f); write_hex_digit((n >> 4u) & 0x0f); write_hex_digit((n >> 0u) & 0x0f); }; if (c == '\\') { str += "\\\\"; } else if (c == '\"') { str += "\\\""; } else if (c == '\'') { str += "\\\'"; } else if (c == '\0') { str += "\\0"; } else if (c == '\b') { str += "\\b"; } else if (c == '\f') { str += "\\f"; } else if (c == '\n') { str += "\\n"; } else if (c == '\r') { str += "\\r"; } else if (c == '\t') { str += "\\t"; } else if (0 <= c && c < 0x20) { str += "\\u"; write_hex_16(static_cast(c)); } else { str += c; } str += "'"; return Text{STRDUP(str.c_str())}; } #define DEFINE_EC(Type) \ Text ec_to_text(Type value) \ { \ auto str = std::to_string(value); \ return Text{STRDUP(str.c_str())}; \ } DEFINE_EC(int) DEFINE_EC(unsigned int) DEFINE_EC(long) DEFINE_EC(unsigned long) DEFINE_EC(long long) DEFINE_EC(unsigned long long) DEFINE_EC(float) DEFINE_EC(double) DEFINE_EC(long double) #undef DEFINE_EC Text ec_to_text(EcHandle ec_handle) { Text parent_ec = get_error_context_for(ec_handle); size_t buffer_size = strlen(parent_ec.c_str()) + 2; char* with_newline = reinterpret_cast(malloc(buffer_size)); with_newline[0] = '\n'; #ifdef _WIN32 strncpy_s(with_newline + 1, buffer_size, parent_ec.c_str(), buffer_size - 2); #else strcpy(with_newline + 1, parent_ec.c_str()); #endif return Text(with_newline); } // ---------------------------------------------------------------------------- } // namespace loguru // ---------------------------------------------------------------------------- // .dP"Y8 88 dP""b8 88b 88 db 88 .dP"Y8 // `Ybo." 88 dP `" 88Yb88 dPYb 88 `Ybo." // o.`Y8b 88 Yb "88 88 Y88 dP__Yb 88 .o o.`Y8b // 8bodP' 88 YboodP 88 Y8 dP""""Yb 88ood8 8bodP' // ---------------------------------------------------------------------------- #ifdef _WIN32 namespace loguru { void install_signal_handlers(const SignalOptions& signal_options) { (void)signal_options; // TODO: implement signal handlers on windows } } // namespace loguru #else // _WIN32 namespace loguru { void write_to_stderr(const char* data, size_t size) { auto result = write(STDERR_FILENO, data, size); (void)result; // Ignore errors. } void write_to_stderr(const char* data) { write_to_stderr(data, strlen(data)); } void call_default_signal_handler(int signal_number) { struct sigaction sig_action; memset(&sig_action, 0, sizeof(sig_action)); sigemptyset(&sig_action.sa_mask); sig_action.sa_handler = SIG_DFL; sigaction(signal_number, &sig_action, NULL); kill(getpid(), signal_number); } void signal_handler(int signal_number, siginfo_t*, void*) { const char* signal_name = "UNKNOWN SIGNAL"; if (signal_number == SIGABRT) { signal_name = "SIGABRT"; } if (signal_number == SIGBUS) { signal_name = "SIGBUS"; } if (signal_number == SIGFPE) { signal_name = "SIGFPE"; } if (signal_number == SIGILL) { signal_name = "SIGILL"; } if (signal_number == SIGINT) { signal_name = "SIGINT"; } if (signal_number == SIGSEGV) { signal_name = "SIGSEGV"; } if (signal_number == SIGTERM) { signal_name = "SIGTERM"; } // -------------------------------------------------------------------- /* There are few things that are safe to do in a signal handler, but writing to stderr is one of them. So we first print out what happened to stderr so we're sure that gets out, then we do the unsafe things, like logging the stack trace. */ if (g_colorlogtostderr && s_terminal_has_color) { write_to_stderr(terminal_reset()); write_to_stderr(terminal_bold()); write_to_stderr(terminal_light_red()); } write_to_stderr("\n"); write_to_stderr("Loguru caught a signal: "); write_to_stderr(signal_name); write_to_stderr("\n"); if (g_colorlogtostderr && s_terminal_has_color) { write_to_stderr(terminal_reset()); } // -------------------------------------------------------------------- if (s_signal_options.unsafe_signal_handler) { // -------------------------------------------------------------------- /* Now we do unsafe things. This can for example lead to deadlocks if the signal was triggered from the system's memory management functions and the code below tries to do allocations. */ flush(); char preamble_buff[LOGURU_PREAMBLE_WIDTH]; print_preamble(preamble_buff, sizeof(preamble_buff), Verbosity_FATAL, "", 0); auto message = Message{Verbosity_FATAL, "", 0, preamble_buff, "", "Signal: ", signal_name}; try { log_message(1, message, false, false); } catch (...) { // This can happed due to s_fatal_handler. write_to_stderr( "Exception caught and ignored by Loguru signal handler.\n"); } flush(); // -------------------------------------------------------------------- } call_default_signal_handler(signal_number); } void install_signal_handlers(const SignalOptions& signal_options) { s_signal_options = signal_options; struct sigaction sig_action; memset(&sig_action, 0, sizeof(sig_action)); sigemptyset(&sig_action.sa_mask); sig_action.sa_flags |= SA_SIGINFO; sig_action.sa_sigaction = &signal_handler; if (signal_options.sigabrt) { CHECK_F(sigaction(SIGABRT, &sig_action, NULL) != -1, "Failed to install handler for SIGABRT"); } if (signal_options.sigbus) { CHECK_F(sigaction(SIGBUS, &sig_action, NULL) != -1, "Failed to install handler for SIGBUS"); } if (signal_options.sigfpe) { CHECK_F(sigaction(SIGFPE, &sig_action, NULL) != -1, "Failed to install handler for SIGFPE"); } if (signal_options.sigill) { CHECK_F(sigaction(SIGILL, &sig_action, NULL) != -1, "Failed to install handler for SIGILL"); } if (signal_options.sigint) { CHECK_F(sigaction(SIGINT, &sig_action, NULL) != -1, "Failed to install handler for SIGINT"); } if (signal_options.sigsegv) { CHECK_F(sigaction(SIGSEGV, &sig_action, NULL) != -1, "Failed to install handler for SIGSEGV"); } if (signal_options.sigterm) { CHECK_F(sigaction(SIGTERM, &sig_action, NULL) != -1, "Failed to install handler for SIGTERM"); } } } // namespace loguru #endif // _WIN32 #ifdef _WIN32 #ifdef _MSC_VER #pragma warning(pop) #endif // _MSC_VER #endif // _WIN32 #endif // LOGURU_IMPLEMENTATION basix-0.3.0/cpp/basix/loguru.hpp000066400000000000000000001600041411115224000165260ustar00rootroot00000000000000/* Loguru logging library for C++, by Emil Ernerfeldt. www.github.com/emilk/loguru If you find Loguru useful, please let me know on twitter or in a mail! Twitter: @ernerfeldt Mail: emil.ernerfeldt@gmail.com Website: www.ilikebigbits.com # License This software is in the public domain. Where that dedication is not recognized, you are granted a perpetual, irrevocable license to copy, modify and distribute it as you see fit. # Inspiration Much of Loguru was inspired by GLOG, https://code.google.com/p/google-glog/. The choice of public domain is fully due Sean T. Barrett and his wonderful stb libraries at https://github.com/nothings/stb. # Version history * Version 0.1.0 - 2015-03-22 - Works great on Mac. * Version 0.2.0 - 2015-09-17 - Removed the only dependency. * Version 0.3.0 - 2015-10-02 - Drop-in replacement for most of GLOG * Version 0.4.0 - 2015-10-07 - Single-file! * Version 0.5.0 - 2015-10-17 - Improved file logging * Version 0.6.0 - 2015-10-24 - Add stack traces * Version 0.7.0 - 2015-10-27 - Signals * Version 0.8.0 - 2015-10-30 - Color logging. * Version 0.9.0 - 2015-11-26 - ABORT_S and proper handling of FATAL * Version 1.0.0 - 2016-02-14 - ERROR_CONTEXT * Version 1.1.0 - 2016-02-19 - -v OFF, -v INFO etc * Version 1.1.1 - 2016-02-20 - textprintf vs strprintf * Version 1.1.2 - 2016-02-22 - Remove g_alsologtostderr * Version 1.1.3 - 2016-02-29 - ERROR_CONTEXT as linked list * Version 1.2.0 - 2016-03-19 - Add get_thread_name() * Version 1.2.1 - 2016-03-20 - Minor fixes * Version 1.2.2 - 2016-03-29 - Fix issues with set_fatal_handler throwing an exception * Version 1.2.3 - 2016-05-16 - Log current working directory in loguru::init(). * Version 1.2.4 - 2016-05-18 - Custom replacement for -v in loguru::init() by bjoernpollex * Version 1.2.5 - 2016-05-18 - Add ability to print ERROR_CONTEXT of parent thread. * Version 1.2.6 - 2016-05-19 - Bug fix regarding VLOG verbosity argument lacking (). * Version 1.2.7 - 2016-05-23 - Fix PATH_MAX problem. * Version 1.2.8 - 2016-05-26 - Add shutdown() and remove_all_callbacks() * Version 1.2.9 - 2016-06-09 - Use a monotonic clock for uptime. * Version 1.3.0 - 2016-07-20 - Fix issues with callback flush/close not being called. * Version 1.3.1 - 2016-07-20 - Add LOGURU_UNSAFE_SIGNAL_HANDLER to toggle stacktrace on signals. * Version 1.3.2 - 2016-07-20 - Add loguru::arguments() * Version 1.4.0 - 2016-09-15 - Semantic versioning + add loguru::create_directories * Version 1.4.1 - 2016-09-29 - Customize formating with LOGURU_FILENAME_WIDTH * Version 1.5.0 - 2016-12-22 - LOGURU_USE_FMTLIB by kolis and LOGURU_WITH_FILEABS by scinart * Version 1.5.1 - 2017-08-08 - Terminal colors on Windows 10 thanks to looki * Version 1.6.0 - 2018-01-03 - Add LOGURU_RTTI and LOGURU_STACKTRACES settings * Version 1.7.0 - 2018-01-03 - Add ability to turn off the preamble with loguru::g_preamble * Version 1.7.1 - 2018-04-05 - Add function get_fatal_handler * Version 1.7.2 - 2018-04-22 - Fix a bug where large file names could cause stack corruption (thanks @ccamporesi) * Version 1.8.0 - 2018-04-23 - Shorten long file names to keep preamble fixed width * Version 1.9.0 - 2018-09-22 - Adjust terminal colors, add LOGURU_VERBOSE_SCOPE_ENDINGS, add LOGURU_SCOPE_TIME_PRECISION, add named log levels * Version 2.0.0 - 2018-09-22 - Split loguru.hpp into loguru.hpp and loguru.cpp * Version 2.1.0 - 2019-09-23 - Update fmtlib + add option to loguru::init to NOT set main thread name. * Version 2.2.0 - 2020-07-31 - Replace LOGURU_CATCH_SIGABRT with struct SignalOptions # Compiling Just include where you want to use Loguru. Then, in one .cpp file #include Make sure you compile with -std=c++11 -lstdc++ -lpthread -ldl # Usage For details, please see the official documentation at emilk.github.io/loguru #include int main(int argc, char* argv[]) { loguru::init(argc, argv); // Put every log message in "everything.log": loguru::add_file("everything.log", loguru::Append, loguru::Verbosity_MAX); LOG_F(INFO, "The magic number is %d", 42); } */ #if defined(LOGURU_IMPLEMENTATION) #error \ "You are defining LOGURU_IMPLEMENTATION. This is for older versions of Loguru. You should now instead include loguru.cpp (or build it and link with it)" #endif // Disable all warnings from gcc/clang: #if defined(__clang__) #pragma clang system_header #elif defined(__GNUC__) #pragma GCC system_header #endif #ifndef LOGURU_HAS_DECLARED_FORMAT_HEADER #define LOGURU_HAS_DECLARED_FORMAT_HEADER // Semantic versioning. Loguru version can be printed with printf("%d.%d.%d", // LOGURU_VERSION_MAJOR, LOGURU_VERSION_MINOR, LOGURU_VERSION_PATCH); #define LOGURU_VERSION_MAJOR 2 #define LOGURU_VERSION_MINOR 1 #define LOGURU_VERSION_PATCH 0 #if defined(_MSC_VER) #include // Needed for _In_z_ etc annotations #endif #if defined(__linux__) || defined(__APPLE__) #define LOGURU_SYSLOG 1 #else #define LOGURU_SYSLOG 0 #endif // ---------------------------------------------------------------------------- #ifndef LOGURU_EXPORT // Define to your project's export declaration if needed for use in a shared // library. #define LOGURU_EXPORT #endif #ifndef LOGURU_SCOPE_TEXT_SIZE // Maximum length of text that can be printed by a LOG_SCOPE. // This should be long enough to get most things, but short enough not to // clutter the stack. #define LOGURU_SCOPE_TEXT_SIZE 196 #endif #ifndef LOGURU_FILENAME_WIDTH // Width of the column containing the file name #define LOGURU_FILENAME_WIDTH 23 #endif #ifndef LOGURU_THREADNAME_WIDTH // Width of the column containing the thread name #define LOGURU_THREADNAME_WIDTH 16 #endif #ifndef LOGURU_SCOPE_TIME_PRECISION // Resolution of scope timers. 3=ms, 6=us, 9=ns #define LOGURU_SCOPE_TIME_PRECISION 3 #endif #ifdef LOGURU_CATCH_SIGABRT #error \ "You are defining LOGURU_CATCH_SIGABRT. his is for older versions of Loguru. You should now instead set the options passed to loguru::init" #endif #ifndef LOGURU_VERBOSE_SCOPE_ENDINGS // Show milliseconds and scope name at end of scope. #define LOGURU_VERBOSE_SCOPE_ENDINGS 1 #endif #ifndef LOGURU_REDEFINE_ASSERT #define LOGURU_REDEFINE_ASSERT 0 #endif #ifndef LOGURU_WITH_STREAMS #define LOGURU_WITH_STREAMS 0 #endif #ifndef LOGURU_REPLACE_GLOG #define LOGURU_REPLACE_GLOG 0 #endif #if LOGURU_REPLACE_GLOG #undef LOGURU_WITH_STREAMS #define LOGURU_WITH_STREAMS 1 #endif #if defined(LOGURU_UNSAFE_SIGNAL_HANDLER) #error \ "You are defining LOGURU_UNSAFE_SIGNAL_HANDLER. This is for older versions of Loguru. You should now instead set the unsafe_signal_handler option when you call loguru::init." #endif #if LOGURU_IMPLEMENTATION #undef LOGURU_WITH_STREAMS #define LOGURU_WITH_STREAMS 1 #endif #ifndef LOGURU_USE_FMTLIB #define LOGURU_USE_FMTLIB 0 #endif #ifndef LOGURU_WITH_FILEABS #define LOGURU_WITH_FILEABS 0 #endif #ifndef LOGURU_RTTI #if defined(__clang__) #if __has_feature(cxx_rtti) #define LOGURU_RTTI 1 #endif #elif defined(__GNUG__) #if defined(__GXX_RTTI) #define LOGURU_RTTI 1 #endif #elif defined(_MSC_VER) #if defined(_CPPRTTI) #define LOGURU_RTTI 1 #endif #endif #endif // -------------------------------------------------------------------- // Utility macros #define LOGURU_CONCATENATE_IMPL(s1, s2) s1##s2 #define LOGURU_CONCATENATE(s1, s2) LOGURU_CONCATENATE_IMPL(s1, s2) #ifdef __COUNTER__ #define LOGURU_ANONYMOUS_VARIABLE(str) LOGURU_CONCATENATE(str, __COUNTER__) #else #define LOGURU_ANONYMOUS_VARIABLE(str) LOGURU_CONCATENATE(str, __LINE__) #endif #if defined(__clang__) || defined(__GNUC__) // Helper macro for declaring functions as having similar signature to printf. // This allows the compiler to catch format errors at compile-time. #define LOGURU_PRINTF_LIKE(fmtarg, firstvararg) \ __attribute__((__format__(__printf__, fmtarg, firstvararg))) #define LOGURU_FORMAT_STRING_TYPE const char* #elif defined(_MSC_VER) #define LOGURU_PRINTF_LIKE(fmtarg, firstvararg) #define LOGURU_FORMAT_STRING_TYPE _In_z_ _Printf_format_string_ const char* #else #define LOGURU_PRINTF_LIKE(fmtarg, firstvararg) #define LOGURU_FORMAT_STRING_TYPE const char* #endif // Used to mark log_and_abort for the benefit of the static analyzer and // optimizer. #if defined(_MSC_VER) #define LOGURU_NORETURN __declspec(noreturn) #else #define LOGURU_NORETURN __attribute__((noreturn)) #endif #if defined(_MSC_VER) #define LOGURU_PREDICT_FALSE(x) (x) #define LOGURU_PREDICT_TRUE(x) (x) #else #define LOGURU_PREDICT_FALSE(x) (__builtin_expect(x, 0)) #define LOGURU_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) #endif #if LOGURU_USE_FMTLIB #include #define LOGURU_FMT(x) "{:" #x "}" #else #define LOGURU_FMT(x) "%" #x #endif #ifdef _WIN32 #define STRDUP(str) _strdup(str) #else #define STRDUP(str) strdup(str) #endif // -------------------------------------------------------------------- namespace loguru { // Simple RAII ownership of a char*. class LOGURU_EXPORT Text { public: explicit Text(char* owned_str) : _str(owned_str) {} ~Text(); Text(Text&& t) { _str = t._str; t._str = nullptr; } Text(Text& t) = delete; Text& operator=(Text& t) = delete; void operator=(Text&& t) = delete; const char* c_str() const { return _str; } bool empty() const { return _str == nullptr || *_str == '\0'; } char* release() { auto result = _str; _str = nullptr; return result; } private: char* _str; }; // Like printf, but returns the formated text. #if LOGURU_USE_FMTLIB LOGURU_EXPORT Text vtextprintf(const char* format, fmt::format_args args); template LOGURU_EXPORT Text textprintf(LOGURU_FORMAT_STRING_TYPE format, const Args&... args) { return vtextprintf(format, fmt::make_format_args(args...)); } #else LOGURU_EXPORT Text textprintf(LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(1, 2); #endif // Overloaded for variadic template matching. LOGURU_EXPORT Text textprintf(); using Verbosity = int; #undef FATAL #undef ERROR #undef WARNING #undef INFO #undef MAX enum NamedVerbosity : Verbosity { // Used to mark an invalid verbosity. Do not log to this level. Verbosity_INVALID = -10, // Never do LOG_F(INVALID) // You may use Verbosity_OFF on g_stderr_verbosity, but for nothing else! Verbosity_OFF = -9, // Never do LOG_F(OFF) // Prefer to use ABORT_F or ABORT_S over LOG_F(FATAL) or LOG_S(FATAL). Verbosity_FATAL = -3, Verbosity_ERROR = -2, Verbosity_WARNING = -1, // Normal messages. By default written to stderr. Verbosity_INFO = 0, // Same as Verbosity_INFO in every way. Verbosity_0 = 0, // Verbosity levels 1-9 are generally not written to stderr, but are written // to file. Verbosity_1 = +1, Verbosity_2 = +2, Verbosity_3 = +3, Verbosity_4 = +4, Verbosity_5 = +5, Verbosity_6 = +6, Verbosity_7 = +7, Verbosity_8 = +8, Verbosity_9 = +9, // Don not use higher verbosity levels, as that will make grepping log files // harder. Verbosity_MAX = +9, }; struct Message { // You would generally print a Message by just concating the buffers without // spacing. Optionally, ignore preamble and indentation. Verbosity verbosity; // Already part of preamble const char* filename; // Already part of preamble unsigned line; // Already part of preamble const char* preamble; // Date, time, uptime, thread, file:line, verbosity. const char* indentation; // Just a bunch of spacing. const char* prefix; // Assertion failure info goes here (or ""). const char* message; // User message goes here. }; /* Everything with a verbosity equal or greater than g_stderr_verbosity will be written to stderr. You can set this in code or via the -v argument. Set to loguru::Verbosity_OFF to write nothing to stderr. Default is 0, i.e. only log ERROR, WARNING and INFO are written to stderr. */ LOGURU_EXPORT extern Verbosity g_stderr_verbosity; LOGURU_EXPORT extern bool g_colorlogtostderr; // True by default. LOGURU_EXPORT extern unsigned g_flush_interval_ms; // 0 (unbuffered) by default. LOGURU_EXPORT extern bool g_preamble_header; // Prepend each log start by a descriptions line with all // columns name? True by default. LOGURU_EXPORT extern bool g_preamble; // Prefix each log line with date, time etc? True by default. /* Specify the verbosity used by loguru to log its info messages including the header logged when logged::init() is called or on exit. Default is 0 (INFO). */ LOGURU_EXPORT extern Verbosity g_internal_verbosity; // Turn off individual parts of the preamble LOGURU_EXPORT extern bool g_preamble_date; // The date field LOGURU_EXPORT extern bool g_preamble_time; // The time of the current day LOGURU_EXPORT extern bool g_preamble_uptime; // The time since init call LOGURU_EXPORT extern bool g_preamble_thread; // The logging thread LOGURU_EXPORT extern bool g_preamble_file; // The file from which the log originates from LOGURU_EXPORT extern bool g_preamble_verbose; // The verbosity field LOGURU_EXPORT extern bool g_preamble_pipe; // The pipe symbol right before the message // May not throw! typedef void (*log_handler_t)(void* user_data, const Message& message); typedef void (*close_handler_t)(void* user_data); typedef void (*flush_handler_t)(void* user_data); // May throw if that's how you'd like to handle your errors. typedef void (*fatal_handler_t)(const Message& message); // Given a verbosity level, return the level's name or nullptr. typedef const char* (*verbosity_to_name_t)(Verbosity verbosity); // Given a verbosity level name, return the verbosity level or // Verbosity_INVALID if name is not recognized. typedef Verbosity (*name_to_verbosity_t)(const char* name); struct SignalOptions { /// Make Loguru try to do unsafe but useful things, /// like printing a stack trace, when catching signals. /// This may lead to bad things like deadlocks in certain situations. bool unsafe_signal_handler = true; /// Should Loguru catch SIGABRT ? bool sigabrt = true; /// Should Loguru catch SIGBUS ? bool sigbus = true; /// Should Loguru catch SIGFPE ? bool sigfpe = true; /// Should Loguru catch SIGILL ? bool sigill = true; /// Should Loguru catch SIGINT ? bool sigint = true; /// Should Loguru catch SIGSEGV ? bool sigsegv = true; /// Should Loguru catch SIGTERM ? bool sigterm = true; static SignalOptions none() { SignalOptions options; options.unsafe_signal_handler = false; options.sigabrt = false; options.sigbus = false; options.sigfpe = false; options.sigill = false; options.sigint = false; options.sigsegv = false; options.sigterm = false; return options; } }; // Runtime options passed to loguru::init struct Options { // This allows you to use something else instead of "-v" via verbosity_flag. // Set to nullptr to if you don't want Loguru to parse verbosity from the // args.' const char* verbosity_flag = "-v"; // loguru::init will set the name of the calling thread to this. // If you don't want Loguru to set the name of the main thread, // set this to nullptr. // NOTE: on SOME platforms loguru::init will only overwrite the thread name // if a thread name has not already been set. // To always set a thread name, use loguru::set_thread_name instead. const char* main_thread_name = "main thread"; SignalOptions signals; }; /* Should be called from the main thread. You don't *need* to call this, but if you do you get: * Signal handlers installed * Program arguments logged * Working dir logged * Optional -v verbosity flag parsed * Main thread name set to "main thread" * Explanation of the preamble (date, threanmae etc) logged loguru::init() will look for arguments meant for loguru and remove them. Arguments meant for loguru are: -v n Set loguru::g_stderr_verbosity level. Examples: -v 3 Show verbosity level 3 and lower. -v 0 Only show INFO, WARNING, ERROR, FATAL (default). -v INFO Only show INFO, WARNING, ERROR, FATAL (default). -v WARNING Only show WARNING, ERROR, FATAL. -v ERROR Only show ERROR, FATAL. -v FATAL Only show FATAL. -v OFF Turn off logging to stderr. Tip: You can set g_stderr_verbosity before calling loguru::init. That way you can set the default but have the user override it with the -v flag. Note that -v does not affect file logging (see loguru::add_file). You can you something other than the -v flag by setting the verbosity_flag option. */ LOGURU_EXPORT void init(int& argc, char* argv[], const Options& options = {}); // Will call remove_all_callbacks(). After calling this, logging will still go // to stderr. You generally don't need to call this. LOGURU_EXPORT void shutdown(); // What ~ will be replaced with, e.g. "/home/your_user_name/" LOGURU_EXPORT const char* home_dir(); /* Returns the name of the app as given in argv[0] but without leading path. That is, if argv[0] is "../foo/app" this will return "app". */ LOGURU_EXPORT const char* argv0_filename(); // Returns all arguments given to loguru::init(), but escaped with a single // space as separator. LOGURU_EXPORT const char* arguments(); // Returns the path to the current working dir when loguru::init() was called. LOGURU_EXPORT const char* current_dir(); // Returns the part of the path after the last / or \ (if any). LOGURU_EXPORT const char* filename(const char* path); // e.g. "foo/bar/baz.ext" will create the directories "foo/" and "foo/bar/" LOGURU_EXPORT bool create_directories(const char* file_path_const); // Writes date and time with millisecond precision, e.g. "20151017_161503.123" LOGURU_EXPORT void write_date_time(char* buff, unsigned buff_size); // Helper: thread-safe version strerror LOGURU_EXPORT Text errno_as_text(); /* Given a prefix of e.g. "~/loguru/" this might return "/home/your_username/loguru/app_name/20151017_161503.123.log" where "app_name" is a sanitized version of argv[0]. */ LOGURU_EXPORT void suggest_log_path(const char* prefix, char* buff, unsigned buff_size); enum FileMode { Truncate, Append }; /* Will log to a file at the given path. Any logging message with a verbosity lower or equal to the given verbosity will be included. The function will create all directories in 'path' if needed. If path starts with a ~, it will be replaced with loguru::home_dir() To stop the file logging, just call loguru::remove_callback(path) with the same path. */ LOGURU_EXPORT bool add_file(const char* path, FileMode mode, Verbosity verbosity); LOGURU_EXPORT // Send logs to syslog with LOG_USER facility (see next call) bool add_syslog(const char* app_name, Verbosity verbosity); LOGURU_EXPORT // Send logs to syslog with your own choice of facility (LOG_USER, LOG_AUTH, // ...) see loguru.cpp: syslog_log() for more details. bool add_syslog(const char* app_name, Verbosity verbosity, int facility); /* Will be called right before abort(). You can for instance use this to print custom error messages, or throw an exception. Feel free to call LOG:ing function from this, but not FATAL ones! */ LOGURU_EXPORT void set_fatal_handler(fatal_handler_t handler); // Get the current fatal handler, if any. Default value is nullptr. LOGURU_EXPORT fatal_handler_t get_fatal_handler(); /* Will be called on each log messages with a verbosity less or equal to the given one. Useful for displaying messages on-screen in a game, for example. The given on_close is also expected to flush (if desired). */ LOGURU_EXPORT void add_callback(const char* id, log_handler_t callback, void* user_data, Verbosity verbosity, close_handler_t on_close = nullptr, flush_handler_t on_flush = nullptr); /* Set a callback that returns custom verbosity level names. If callback is nullptr or returns nullptr, default log names will be used. */ LOGURU_EXPORT void set_verbosity_to_name_callback(verbosity_to_name_t callback); /* Set a callback that returns the verbosity level matching a name. The callback should return Verbosity_INVALID if the name is not recognized. */ LOGURU_EXPORT void set_name_to_verbosity_callback(name_to_verbosity_t callback); /* Get a custom name for a specific verbosity, if one exists, or nullptr. */ LOGURU_EXPORT const char* get_verbosity_name(Verbosity verbosity); /* Get the verbosity enum value from a custom 4-character level name, if one exists. If the name does not match a custom level name, Verbosity_INVALID is returned. */ LOGURU_EXPORT Verbosity get_verbosity_from_name(const char* name); // Returns true iff the callback was found (and removed). LOGURU_EXPORT bool remove_callback(const char* id); // Shut down all file logging and any other callback hooks installed. LOGURU_EXPORT void remove_all_callbacks(); // Returns the maximum of g_stderr_verbosity and all file/custom outputs. LOGURU_EXPORT Verbosity current_verbosity_cutoff(); #if LOGURU_USE_FMTLIB // Internal functions LOGURU_EXPORT void vlog(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, fmt::format_args args); LOGURU_EXPORT void raw_vlog(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, fmt::format_args args); // Actual logging function. Use the LOG macro instead of calling this directly. template LOGURU_EXPORT void log(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, const Args&... args) { vlog(verbosity, file, line, format, fmt::make_format_args(args...)); } // Log without any preamble or indentation. template LOGURU_EXPORT void raw_log(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, const Args&... args) { raw_vlog(verbosity, file, line, format, fmt::make_format_args(args...)); } #else // LOGURU_USE_FMTLIB? // Actual logging function. Use the LOG macro instead of calling this // directly. LOGURU_EXPORT void log(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(4, 5); // Log without any preamble or indentation. LOGURU_EXPORT void raw_log(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(4, 5); #endif // !LOGURU_USE_FMTLIB // Helper class for LOG_SCOPE_F class LOGURU_EXPORT LogScopeRAII { public: LogScopeRAII() : _file(nullptr) {} // No logging LogScopeRAII(Verbosity verbosity, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(5, 6); ~LogScopeRAII(); #if defined(_MSC_VER) && _MSC_VER > 1800 // older MSVC default move ctors close the scope on move. See // issue #43 LogScopeRAII(LogScopeRAII&& other) : _verbosity(other._verbosity), _file(other._file), _line(other._line), _indent_stderr(other._indent_stderr), _start_time_ns(other._start_time_ns) { // Make sure the tmp object's destruction doesn't close the scope: other._file = nullptr; for (unsigned int i = 0; i < LOGURU_SCOPE_TEXT_SIZE; ++i) { _name[i] = other._name[i]; } } #else LogScopeRAII(LogScopeRAII&&) = default; #endif private: LogScopeRAII(const LogScopeRAII&) = delete; LogScopeRAII& operator=(const LogScopeRAII&) = delete; void operator=(LogScopeRAII&&) = delete; Verbosity _verbosity; const char* _file; // Set to null if we are disabled due to verbosity unsigned _line; bool _indent_stderr; // Did we? long long _start_time_ns; char _name[LOGURU_SCOPE_TEXT_SIZE]; }; // Marked as 'noreturn' for the benefit of the static analyzer and optimizer. // stack_trace_skip is the number of extrace stack frames to skip above // log_and_abort. #if LOGURU_USE_FMTLIB LOGURU_EXPORT LOGURU_NORETURN void vlog_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, fmt::format_args); template LOGURU_EXPORT LOGURU_NORETURN void log_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, const Args&... args) { vlog_and_abort(stack_trace_skip, expr, file, line, format, fmt::make_format_args(args...)); } #else LOGURU_EXPORT LOGURU_NORETURN void log_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line, LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(5, 6); #endif LOGURU_EXPORT LOGURU_NORETURN void log_and_abort(int stack_trace_skip, const char* expr, const char* file, unsigned line); // Flush output to stderr and files. // If g_flush_interval_ms is set to non-zero, this will be called automatically // this often. If not set, you do not need to call this at all. LOGURU_EXPORT void flush(); template inline Text format_value(const T&) { return textprintf("N/A"); } template <> inline Text format_value(const char& v) { return textprintf(LOGURU_FMT(c), v); } template <> inline Text format_value(const int& v) { return textprintf(LOGURU_FMT(d), v); } template <> inline Text format_value(const unsigned int& v) { return textprintf(LOGURU_FMT(u), v); } template <> inline Text format_value(const long& v) { return textprintf(LOGURU_FMT(lu), v); } template <> inline Text format_value(const unsigned long& v) { return textprintf(LOGURU_FMT(ld), v); } template <> inline Text format_value(const long long& v) { return textprintf(LOGURU_FMT(llu), v); } template <> inline Text format_value(const unsigned long long& v) { return textprintf(LOGURU_FMT(lld), v); } template <> inline Text format_value(const float& v) { return textprintf(LOGURU_FMT(f), v); } template <> inline Text format_value(const double& v) { return textprintf(LOGURU_FMT(f), v); } /* Thread names can be set for the benefit of readable logs. If you do not set the thread name, a hex id will be shown instead. These thread names may or may not be the same as the system thread names, depending on the system. Try to limit the thread name to 15 characters or less. */ LOGURU_EXPORT void set_thread_name(const char* name); /* Returns the thread name for this thread. On most *nix systems this will return the system thread name (settable from both within and without Loguru). On other systems it will return whatever you set in `set_thread_name()`; If no thread name is set, this will return a hexadecimal thread id. `length` should be the number of bytes available in the buffer. 17 is a good number for length. `right_align_hex_id` means any hexadecimal thread id will be written to the end of buffer. */ LOGURU_EXPORT void get_thread_name(char* buffer, unsigned long long length, bool right_align_hex_id); /* Generates a readable stacktrace as a string. 'skip' specifies how many stack frames to skip. For instance, the default skip (1) means: don't include the call to loguru::stacktrace in the stack trace. */ LOGURU_EXPORT Text stacktrace(int skip = 1); /* Add a string to be replaced with something else in the stack output. For instance, instead of having a stack trace look like this: 0x41f541 some_function(std::basic_ofstream >&) You can clean it up with: auto verbose_type_name = loguru::demangle(typeid(std::ofstream).name()); loguru::add_stack_cleanup(verbose_type_name.c_str(); "std::ofstream"); So the next time you will instead see: 0x41f541 some_function(std::ofstream&) `replace_with_this` must be shorter than `find_this`. */ LOGURU_EXPORT void add_stack_cleanup(const char* find_this, const char* replace_with_this); // Example: demangle(typeid(std::ofstream).name()) -> "std::basic_ofstream >" LOGURU_EXPORT Text demangle(const char* name); // ------------------------------------------------------------------------ /* Not all terminals support colors, but if they do, and g_colorlogtostderr is set, Loguru will write them to stderr to make errors in red, etc. You also have the option to manually use them, via the function below. Note, however, that if you do, the color codes could end up in your logfile! This means if you intend to use them functions you should either: * Use them on the stderr/stdout directly (bypass Loguru). * Don't add file outputs to Loguru. * Expect some \e[1m things in your logfile. Usage: printf("%sRed%sGreen%sBold green%sClear again\n", loguru::terminal_red(), loguru::terminal_green(), loguru::terminal_bold(), loguru::terminal_reset()); If the terminal at hand does not support colors the above output will just not have funky \e[1m things showing. */ // Do the output terminal support colors? LOGURU_EXPORT bool terminal_has_color(); // Colors LOGURU_EXPORT const char* terminal_black(); LOGURU_EXPORT const char* terminal_red(); LOGURU_EXPORT const char* terminal_green(); LOGURU_EXPORT const char* terminal_yellow(); LOGURU_EXPORT const char* terminal_blue(); LOGURU_EXPORT const char* terminal_purple(); LOGURU_EXPORT const char* terminal_cyan(); LOGURU_EXPORT const char* terminal_light_gray(); LOGURU_EXPORT const char* terminal_light_red(); LOGURU_EXPORT const char* terminal_white(); // Formating LOGURU_EXPORT const char* terminal_bold(); LOGURU_EXPORT const char* terminal_underline(); // You should end each line with this! LOGURU_EXPORT const char* terminal_reset(); // -------------------------------------------------------------------- // Error context related: struct StringStream; // Use this in your EcEntryBase::print_value overload. LOGURU_EXPORT void stream_print(StringStream& out_string_stream, const char* text); class LOGURU_EXPORT EcEntryBase { public: EcEntryBase(const char* file, unsigned line, const char* descr); ~EcEntryBase(); EcEntryBase(const EcEntryBase&) = delete; EcEntryBase(EcEntryBase&&) = delete; EcEntryBase& operator=(const EcEntryBase&) = delete; EcEntryBase& operator=(EcEntryBase&&) = delete; virtual void print_value(StringStream& out_string_stream) const = 0; EcEntryBase* previous() const { return _previous; } // private: const char* _file; unsigned _line; const char* _descr; EcEntryBase* _previous; }; template class EcEntryData : public EcEntryBase { public: using Printer = Text (*)(T data); EcEntryData(const char* file, unsigned line, const char* descr, T data, Printer&& printer) : EcEntryBase(file, line, descr), _data(data), _printer(printer) { } virtual void print_value(StringStream& out_string_stream) const override { const auto str = _printer(_data); stream_print(out_string_stream, str.c_str()); } private: T _data; Printer _printer; }; // template // class EcEntryLambda : public EcEntryBase // { // public: // EcEntryLambda(const char* file, unsigned line, const char* descr, // Printer&& printer) : EcEntryBase(file, line, descr), // _printer(std::move(printer)) {} // virtual void print_value(StringStream& out_string_stream) const override // { // const auto str = _printer(); // stream_print(out_string_stream, str.c_str()); // } // private: // Printer _printer; // }; // template // EcEntryLambda make_ec_entry_lambda(const char* file, unsigned line, // const char* descr, Printer&& printer) // { // return {file, line, descr, std::move(printer)}; // } template struct decay_char_array { using type = T; }; template struct decay_char_array { using type = const char*; }; template struct make_const_ptr { using type = T; }; template struct make_const_ptr { using type = const T*; }; template struct make_ec_type { using type = typename make_const_ptr::type>::type; }; /* A stack trace gives you the names of the function at the point of a crash. With ERROR_CONTEXT, you can also get the values of select local variables. Usage: void process_customers(const std::string& filename) { ERROR_CONTEXT("Processing file", filename.c_str()); for (int customer_index : ...) { ERROR_CONTEXT("Customer index", customer_index); ... } } The context is in effect during the scope of the ERROR_CONTEXT. Use loguru::get_error_context() to get the contents of the active error contexts. Example result: ------------------------------------------------ [ErrorContext] main.cpp:416 Processing file: "customers.json" [ErrorContext] main.cpp:417 Customer index: 42 ------------------------------------------------ Error contexts are printed automatically on crashes, and only on crashes. This makes them much faster than logging the value of a variable. */ #define ERROR_CONTEXT(descr, data) \ const loguru::EcEntryData::type> \ LOGURU_ANONYMOUS_VARIABLE(error_context_scope_)( \ __FILE__, __LINE__, descr, data, \ static_cast::type>::Printer>( \ loguru::ec_to_text)) // For better error messages /* #define ERROR_CONTEXT(descr, data) \ const auto LOGURU_ANONYMOUS_VARIABLE(error_context_scope_)( \ loguru::make_ec_entry_lambda(__FILE__, __LINE__, descr, \ [=](){ return loguru::ec_to_text(data); })) */ using EcHandle = const EcEntryBase*; /* Get a light-weight handle to the error context stack on this thread. The handle is valid as long as the current thread has no changes to its error context stack. You can pass the handle to loguru::get_error_context on another thread. This can be very useful for when you have a parent thread spawning several working threads, and you want the error context of the parent thread to get printed (too) when there is an error on the child thread. You can accomplish this thusly: void foo(const char* parameter) { ERROR_CONTEXT("parameter", parameter) const auto parent_ec_handle = loguru::get_thread_ec_handle(); std::thread([=]{ loguru::set_thread_name("child thread"); ERROR_CONTEXT("parent context", parent_ec_handle); dangerous_code(); }.join(); } */ LOGURU_EXPORT EcHandle get_thread_ec_handle(); // Get a string describing the current stack of error context. Empty string if // there is none. LOGURU_EXPORT Text get_error_context(); // Get a string describing the error context of the given thread handle. LOGURU_EXPORT Text get_error_context_for(EcHandle ec_handle); // ------------------------------------------------------------------------ LOGURU_EXPORT Text ec_to_text(const char* data); LOGURU_EXPORT Text ec_to_text(char data); LOGURU_EXPORT Text ec_to_text(int data); LOGURU_EXPORT Text ec_to_text(unsigned int data); LOGURU_EXPORT Text ec_to_text(long data); LOGURU_EXPORT Text ec_to_text(unsigned long data); LOGURU_EXPORT Text ec_to_text(long long data); LOGURU_EXPORT Text ec_to_text(unsigned long long data); LOGURU_EXPORT Text ec_to_text(float data); LOGURU_EXPORT Text ec_to_text(double data); LOGURU_EXPORT Text ec_to_text(long double data); LOGURU_EXPORT Text ec_to_text(EcHandle); /* You can add ERROR_CONTEXT support for your own types by overloading ec_to_text. Here's how: some.hpp: namespace loguru { Text ec_to_text(MySmallType data) Text ec_to_text(const MyBigType* data) } // namespace loguru some.cpp: namespace loguru { Text ec_to_text(MySmallType small_value) { // Called only when needed, i.e. on a crash. std::string str = small_value.as_string(); // Format 'small_value' here somehow. return Text{STRDUP(str.c_str())}; } Text ec_to_text(const MyBigType* big_value) { // Called only when needed, i.e. on a crash. std::string str = big_value->as_string(); // Format 'big_value' here somehow. return Text{STRDUP(str.c_str())}; } } // namespace loguru Any file that include some.hpp: void foo(MySmallType small, const MyBigType& big) { ERROR_CONTEXT("Small", small); // Copy ´small` by value. ERROR_CONTEXT("Big", &big); // `big` should not change during this scope! .... } */ } // namespace loguru // -------------------------------------------------------------------- // Logging macros // LOG_F(2, "Only logged if verbosity is 2 or higher: %d", some_number); #define VLOG_F(verbosity, ...) \ ((verbosity) > loguru::current_verbosity_cutoff()) \ ? (void)0 \ : loguru::log(verbosity, __FILE__, __LINE__, __VA_ARGS__) // LOG_F(INFO, "Foo: %d", some_number); #define LOG_F(verbosity_name, ...) \ VLOG_F(loguru::Verbosity_##verbosity_name, __VA_ARGS__) #define VLOG_IF_F(verbosity, cond, ...) \ ((verbosity) > loguru::current_verbosity_cutoff() || (cond) == false) \ ? (void)0 \ : loguru::log(verbosity, __FILE__, __LINE__, __VA_ARGS__) #define LOG_IF_F(verbosity_name, cond, ...) \ VLOG_IF_F(loguru::Verbosity_##verbosity_name, cond, __VA_ARGS__) #define VLOG_SCOPE_F(verbosity, ...) \ loguru::LogScopeRAII LOGURU_ANONYMOUS_VARIABLE(error_context_RAII_) \ = ((verbosity) > loguru::current_verbosity_cutoff()) \ ? loguru::LogScopeRAII() \ : loguru::LogScopeRAII(verbosity, __FILE__, __LINE__, __VA_ARGS__) // Raw logging - no preamble, no indentation. Slightly faster than full logging. #define RAW_VLOG_F(verbosity, ...) \ ((verbosity) > loguru::current_verbosity_cutoff()) \ ? (void)0 \ : loguru::raw_log(verbosity, __FILE__, __LINE__, __VA_ARGS__) #define RAW_LOG_F(verbosity_name, ...) \ RAW_VLOG_F(loguru::Verbosity_##verbosity_name, __VA_ARGS__) // Use to book-end a scope. Affects logging on all threads. #define LOG_SCOPE_F(verbosity_name, ...) \ VLOG_SCOPE_F(loguru::Verbosity_##verbosity_name, __VA_ARGS__) #define LOG_SCOPE_FUNCTION(verbosity_name) LOG_SCOPE_F(verbosity_name, __func__) // ----------------------------------------------- // ABORT_F macro. Usage: ABORT_F("Cause of error: %s", error_str); // Message is optional #define ABORT_F(...) \ loguru::log_and_abort(0, "ABORT: ", __FILE__, __LINE__, __VA_ARGS__) // -------------------------------------------------------------------- // CHECK_F macros: #define CHECK_WITH_INFO_F(test, info, ...) \ LOGURU_PREDICT_TRUE((test) == true) \ ? (void)0 \ : loguru::log_and_abort(0, "CHECK FAILED: " info " ", __FILE__, __LINE__, \ ##__VA_ARGS__) /* Checked at runtime too. Will print error, then call fatal_handler (if any), then 'abort'. Note that the test must be boolean. CHECK_F(ptr); will not compile, but CHECK_F(ptr != nullptr); will. */ #define CHECK_F(test, ...) CHECK_WITH_INFO_F(test, #test, ##__VA_ARGS__) #define CHECK_NOTNULL_F(x, ...) \ CHECK_WITH_INFO_F((x) != nullptr, #x " != nullptr", ##__VA_ARGS__) #define CHECK_OP_F(expr_left, expr_right, op, ...) \ do \ { \ auto val_left = expr_left; \ auto val_right = expr_right; \ if (!LOGURU_PREDICT_TRUE(val_left op val_right)) \ { \ auto str_left = loguru::format_value(val_left); \ auto str_right = loguru::format_value(val_right); \ auto fail_info = loguru::textprintf( \ "CHECK FAILED: " LOGURU_FMT(s) " " LOGURU_FMT(s) " " LOGURU_FMT( \ s) " (" LOGURU_FMT(s) " " LOGURU_FMT(s) " " LOGURU_FMT(s) ") " \ " ", \ #expr_left, #op, #expr_right, str_left.c_str(), #op, \ str_right.c_str()); \ auto user_msg = loguru::textprintf(__VA_ARGS__); \ loguru::log_and_abort(0, fail_info.c_str(), __FILE__, __LINE__, \ LOGURU_FMT(s), user_msg.c_str()); \ } \ } while (false) #ifndef LOGURU_DEBUG_LOGGING #ifndef NDEBUG #define LOGURU_DEBUG_LOGGING 1 #else #define LOGURU_DEBUG_LOGGING 0 #endif #endif #if LOGURU_DEBUG_LOGGING // Debug logging enabled: #define DLOG_F(verbosity_name, ...) LOG_F(verbosity_name, __VA_ARGS__) #define DVLOG_F(verbosity, ...) VLOG_F(verbosity, __VA_ARGS__) #define DLOG_IF_F(verbosity_name, ...) LOG_IF_F(verbosity_name, __VA_ARGS__) #define DVLOG_IF_F(verbosity, ...) VLOG_IF_F(verbosity, __VA_ARGS__) #define DRAW_LOG_F(verbosity_name, ...) RAW_LOG_F(verbosity_name, __VA_ARGS__) #define DRAW_VLOG_F(verbosity, ...) RAW_VLOG_F(verbosity, __VA_ARGS__) #else // Debug logging disabled: #define DLOG_F(verbosity_name, ...) #define DVLOG_F(verbosity, ...) #define DLOG_IF_F(verbosity_name, ...) #define DVLOG_IF_F(verbosity, ...) #define DRAW_LOG_F(verbosity_name, ...) #define DRAW_VLOG_F(verbosity, ...) #endif #define CHECK_EQ_F(a, b, ...) CHECK_OP_F(a, b, ==, ##__VA_ARGS__) #define CHECK_NE_F(a, b, ...) CHECK_OP_F(a, b, !=, ##__VA_ARGS__) #define CHECK_LT_F(a, b, ...) CHECK_OP_F(a, b, <, ##__VA_ARGS__) #define CHECK_GT_F(a, b, ...) CHECK_OP_F(a, b, >, ##__VA_ARGS__) #define CHECK_LE_F(a, b, ...) CHECK_OP_F(a, b, <=, ##__VA_ARGS__) #define CHECK_GE_F(a, b, ...) CHECK_OP_F(a, b, >=, ##__VA_ARGS__) #ifndef LOGURU_DEBUG_CHECKS #ifndef NDEBUG #define LOGURU_DEBUG_CHECKS 1 #else #define LOGURU_DEBUG_CHECKS 0 #endif #endif #if LOGURU_DEBUG_CHECKS // Debug checks enabled: #define DCHECK_F(test, ...) CHECK_F(test, ##__VA_ARGS__) #define DCHECK_NOTNULL_F(x, ...) CHECK_NOTNULL_F(x, ##__VA_ARGS__) #define DCHECK_EQ_F(a, b, ...) CHECK_EQ_F(a, b, ##__VA_ARGS__) #define DCHECK_NE_F(a, b, ...) CHECK_NE_F(a, b, ##__VA_ARGS__) #define DCHECK_LT_F(a, b, ...) CHECK_LT_F(a, b, ##__VA_ARGS__) #define DCHECK_LE_F(a, b, ...) CHECK_LE_F(a, b, ##__VA_ARGS__) #define DCHECK_GT_F(a, b, ...) CHECK_GT_F(a, b, ##__VA_ARGS__) #define DCHECK_GE_F(a, b, ...) CHECK_GE_F(a, b, ##__VA_ARGS__) #else // Debug checks disabled: #define DCHECK_F(test, ...) #define DCHECK_NOTNULL_F(x, ...) #define DCHECK_EQ_F(a, b, ...) #define DCHECK_NE_F(a, b, ...) #define DCHECK_LT_F(a, b, ...) #define DCHECK_LE_F(a, b, ...) #define DCHECK_GT_F(a, b, ...) #define DCHECK_GE_F(a, b, ...) #endif // NDEBUG #if LOGURU_REDEFINE_ASSERT #undef assert #ifndef NDEBUG // Debug: #define assert(test) CHECK_WITH_INFO_F(!!(test), #test) // HACK #else #define assert(test) #endif #endif // LOGURU_REDEFINE_ASSERT #endif // LOGURU_HAS_DECLARED_FORMAT_HEADER // ---------------------------------------------------------------------------- // .dP"Y8 888888 88""Yb 888888 db 8b d8 .dP"Y8 // `Ybo." 88 88__dP 88__ dPYb 88b d88 `Ybo." // o.`Y8b 88 88"Yb 88"" dP__Yb 88YbdP88 o.`Y8b // 8bodP' 88 88 Yb 888888 dP""""Yb 88 YY 88 8bodP' #if LOGURU_WITH_STREAMS #ifndef LOGURU_HAS_DECLARED_STREAMS_HEADER #define LOGURU_HAS_DECLARED_STREAMS_HEADER /* This file extends loguru to enable std::stream-style logging, a la Glog. It's an optional feature behind the LOGURU_WITH_STREAMS settings because including it everywhere will slow down compilation times. */ #include #include // Adds about 38 kLoC on clang. #include namespace loguru { // Like sprintf, but returns the formated text. LOGURU_EXPORT std::string strprintf(LOGURU_FORMAT_STRING_TYPE format, ...) LOGURU_PRINTF_LIKE(1, 2); // Like vsprintf, but returns the formated text. LOGURU_EXPORT std::string vstrprintf(LOGURU_FORMAT_STRING_TYPE format, va_list) LOGURU_PRINTF_LIKE(1, 0); class LOGURU_EXPORT StreamLogger { public: StreamLogger(Verbosity verbosity, const char* file, unsigned line) : _verbosity(verbosity), _file(file), _line(line) { } ~StreamLogger() noexcept(false); template StreamLogger& operator<<(const T& t) { _ss << t; return *this; } // std::endl and other iomanip:s. StreamLogger& operator<<(std::ostream& (*f)(std::ostream&)) { f(_ss); return *this; } private: Verbosity _verbosity; const char* _file; unsigned _line; std::ostringstream _ss; }; class LOGURU_EXPORT AbortLogger { public: AbortLogger(const char* expr, const char* file, unsigned line) : _expr(expr), _file(file), _line(line) { } LOGURU_NORETURN ~AbortLogger() noexcept(false); template AbortLogger& operator<<(const T& t) { _ss << t; return *this; } // std::endl and other iomanip:s. AbortLogger& operator<<(std::ostream& (*f)(std::ostream&)) { f(_ss); return *this; } private: const char* _expr; const char* _file; unsigned _line; std::ostringstream _ss; }; class LOGURU_EXPORT Voidify { public: Voidify() {} // This has to be an operator with a precedence lower than << but higher than // ?: void operator&(const StreamLogger&) {} void operator&(const AbortLogger&) {} }; /* Helper functions for CHECK_OP_S macro. GLOG trick: The (int, int) specialization works around the issue that the compiler will not instantiate the template version of the function on values of unnamed enum type. */ #define DEFINE_CHECK_OP_IMPL(name, op) \ template \ inline std::string* name(const char* expr, const T1& v1, const char* op_str, \ const T2& v2) \ { \ if (LOGURU_PREDICT_TRUE(v1 op v2)) \ { \ return NULL; \ } \ std::ostringstream ss; \ ss << "CHECK FAILED: " << expr << " (" << v1 << " " << op_str << " " \ << v2 << ") "; \ return new std::string(ss.str()); \ } \ inline std::string* name(const char* expr, int v1, const char* op_str, \ int v2) \ { \ return name(expr, v1, op_str, v2); \ } DEFINE_CHECK_OP_IMPL(check_EQ_impl, ==) DEFINE_CHECK_OP_IMPL(check_NE_impl, !=) DEFINE_CHECK_OP_IMPL(check_LE_impl, <=) DEFINE_CHECK_OP_IMPL(check_LT_impl, <) DEFINE_CHECK_OP_IMPL(check_GE_impl, >=) DEFINE_CHECK_OP_IMPL(check_GT_impl, >) #undef DEFINE_CHECK_OP_IMPL /* GLOG trick: Function is overloaded for integral types to allow static const integrals declared in classes and not defined to be used as arguments to CHECK* macros. */ template inline const T& referenceable_value(const T& t) { return t; } inline char referenceable_value(char t) { return t; } inline unsigned char referenceable_value(unsigned char t) { return t; } inline signed char referenceable_value(signed char t) { return t; } inline short referenceable_value(short t) { return t; } inline unsigned short referenceable_value(unsigned short t) { return t; } inline int referenceable_value(int t) { return t; } inline unsigned int referenceable_value(unsigned int t) { return t; } inline long referenceable_value(long t) { return t; } inline unsigned long referenceable_value(unsigned long t) { return t; } inline long long referenceable_value(long long t) { return t; } inline unsigned long long referenceable_value(unsigned long long t) { return t; } } // namespace loguru // ----------------------------------------------- // Logging macros: // usage: LOG_STREAM(INFO) << "Foo " << std::setprecision(10) << some_value; #define VLOG_IF_S(verbosity, cond) \ ((verbosity) > loguru::current_verbosity_cutoff() || (cond) == false) \ ? (void)0 \ : loguru::Voidify() \ & loguru::StreamLogger(verbosity, __FILE__, __LINE__) #define LOG_IF_S(verbosity_name, cond) \ VLOG_IF_S(loguru::Verbosity_##verbosity_name, cond) #define VLOG_S(verbosity) VLOG_IF_S(verbosity, true) #define LOG_S(verbosity_name) VLOG_S(loguru::Verbosity_##verbosity_name) // ----------------------------------------------- // ABORT_S macro. Usage: ABORT_S() << "Causo of error: " << details; #define ABORT_S() \ loguru::Voidify() & loguru::AbortLogger("ABORT: ", __FILE__, __LINE__) // ----------------------------------------------- // CHECK_S macros: #define CHECK_WITH_INFO_S(cond, info) \ LOGURU_PREDICT_TRUE((cond) == true) \ ? (void)0 \ : loguru::Voidify() \ & loguru::AbortLogger("CHECK FAILED: " info " ", __FILE__, \ __LINE__) #define CHECK_S(cond) CHECK_WITH_INFO_S(cond, #cond) #define CHECK_NOTNULL_S(x) CHECK_WITH_INFO_S((x) != nullptr, #x " != nullptr") #define CHECK_OP_S(function_name, expr1, op, expr2) \ while (auto error_string = loguru::function_name( \ #expr1 " " #op " " #expr2, loguru::referenceable_value(expr1), \ #op, loguru::referenceable_value(expr2))) \ loguru::AbortLogger(error_string->c_str(), __FILE__, __LINE__) #define CHECK_EQ_S(expr1, expr2) CHECK_OP_S(check_EQ_impl, expr1, ==, expr2) #define CHECK_NE_S(expr1, expr2) CHECK_OP_S(check_NE_impl, expr1, !=, expr2) #define CHECK_LE_S(expr1, expr2) CHECK_OP_S(check_LE_impl, expr1, <=, expr2) #define CHECK_LT_S(expr1, expr2) CHECK_OP_S(check_LT_impl, expr1, <, expr2) #define CHECK_GE_S(expr1, expr2) CHECK_OP_S(check_GE_impl, expr1, >=, expr2) #define CHECK_GT_S(expr1, expr2) CHECK_OP_S(check_GT_impl, expr1, >, expr2) #if LOGURU_DEBUG_LOGGING // Debug logging enabled: #define DVLOG_IF_S(verbosity, cond) VLOG_IF_S(verbosity, cond) #define DLOG_IF_S(verbosity_name, cond) LOG_IF_S(verbosity_name, cond) #define DVLOG_S(verbosity) VLOG_S(verbosity) #define DLOG_S(verbosity_name) LOG_S(verbosity_name) #else // Debug logging disabled: #define DVLOG_IF_S(verbosity, cond) \ (true || (verbosity) > loguru::current_verbosity_cutoff() \ || (cond) == false) \ ? (void)0 \ : loguru::Voidify() \ & loguru::StreamLogger(verbosity, __FILE__, __LINE__) #define DLOG_IF_S(verbosity_name, cond) \ DVLOG_IF_S(loguru::Verbosity_##verbosity_name, cond) #define DVLOG_S(verbosity) DVLOG_IF_S(verbosity, true) #define DLOG_S(verbosity_name) DVLOG_S(loguru::Verbosity_##verbosity_name) #endif #if LOGURU_DEBUG_CHECKS // Debug checks enabled: #define DCHECK_S(cond) CHECK_S(cond) #define DCHECK_NOTNULL_S(x) CHECK_NOTNULL_S(x) #define DCHECK_EQ_S(a, b) CHECK_EQ_S(a, b) #define DCHECK_NE_S(a, b) CHECK_NE_S(a, b) #define DCHECK_LT_S(a, b) CHECK_LT_S(a, b) #define DCHECK_LE_S(a, b) CHECK_LE_S(a, b) #define DCHECK_GT_S(a, b) CHECK_GT_S(a, b) #define DCHECK_GE_S(a, b) CHECK_GE_S(a, b) #else // Debug checks disabled: #define DCHECK_S(cond) CHECK_S(true || (cond)) #define DCHECK_NOTNULL_S(x) CHECK_S(true || (x) != nullptr) #define DCHECK_EQ_S(a, b) CHECK_S(true || (a) == (b)) #define DCHECK_NE_S(a, b) CHECK_S(true || (a) != (b)) #define DCHECK_LT_S(a, b) CHECK_S(true || (a) < (b)) #define DCHECK_LE_S(a, b) CHECK_S(true || (a) <= (b)) #define DCHECK_GT_S(a, b) CHECK_S(true || (a) > (b)) #define DCHECK_GE_S(a, b) CHECK_S(true || (a) >= (b)) #endif #if LOGURU_REPLACE_GLOG #undef LOG #undef VLOG #undef LOG_IF #undef VLOG_IF #undef CHECK #undef CHECK_NOTNULL #undef CHECK_EQ #undef CHECK_NE #undef CHECK_LT #undef CHECK_LE #undef CHECK_GT #undef CHECK_GE #undef DLOG #undef DVLOG #undef DLOG_IF #undef DVLOG_IF #undef DCHECK #undef DCHECK_NOTNULL #undef DCHECK_EQ #undef DCHECK_NE #undef DCHECK_LT #undef DCHECK_LE #undef DCHECK_GT #undef DCHECK_GE #undef VLOG_IS_ON #define LOG LOG_S #define VLOG VLOG_S #define LOG_IF LOG_IF_S #define VLOG_IF VLOG_IF_S #define CHECK(cond) CHECK_S(!!(cond)) #define CHECK_NOTNULL CHECK_NOTNULL_S #define CHECK_EQ CHECK_EQ_S #define CHECK_NE CHECK_NE_S #define CHECK_LT CHECK_LT_S #define CHECK_LE CHECK_LE_S #define CHECK_GT CHECK_GT_S #define CHECK_GE CHECK_GE_S #define DLOG DLOG_S #define DVLOG DVLOG_S #define DLOG_IF DLOG_IF_S #define DVLOG_IF DVLOG_IF_S #define DCHECK DCHECK_S #define DCHECK_NOTNULL DCHECK_NOTNULL_S #define DCHECK_EQ DCHECK_EQ_S #define DCHECK_NE DCHECK_NE_S #define DCHECK_LT DCHECK_LT_S #define DCHECK_LE DCHECK_LE_S #define DCHECK_GT DCHECK_GT_S #define DCHECK_GE DCHECK_GE_S #define VLOG_IS_ON(verbosity) \ ((verbosity) <= loguru::current_verbosity_cutoff()) #endif // LOGURU_REPLACE_GLOG #endif // LOGURU_WITH_STREAMS #endif // LOGURU_HAS_DECLARED_STREAMS_HEADER basix-0.3.0/cpp/basix/maps.cpp000066400000000000000000000015771411115224000161550ustar00rootroot00000000000000// Copyright (c) 2021 Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #include "maps.h" #include #include //----------------------------------------------------------------------------- const std::string& basix::maps::type_to_str(maps::type type) { static const std::map type_to_name = {{maps::type::identity, "identity"}, {maps::type::covariantPiola, "covariant Piola"}, {maps::type::contravariantPiola, "contravariant Piola"}, {maps::type::doubleCovariantPiola, "double covariant Piola"}, {maps::type::doubleContravariantPiola, "double contravariant Piola"}}; auto it = type_to_name.find(type); if (it == type_to_name.end()) throw std::runtime_error("Can't find type"); return it->second; } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/maps.h000066400000000000000000000103131411115224000156060ustar00rootroot00000000000000// Copyright (c) 2021 Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include #include #include #include #include #include /// Information about finite element maps namespace basix::maps { /// Cell type enum class type { identity, covariantPiola, contravariantPiola, doubleCovariantPiola, doubleContravariantPiola, }; /// Convert mapping type enum to string const std::string& type_to_str(maps::type type); namespace impl { template void dot22(O& r, const Mat0& A, const Mat1& B, const Mat2& C) { assert(A.shape(1) == B.shape(0)); r = 0; for (std::size_t i = 0; i < r.shape(0); ++i) for (std::size_t j = 0; j < r.shape(1); ++j) for (std::size_t k = 0; k < A.shape(1); ++k) for (std::size_t l = 0; l < B.shape(1); ++l) r(i, j) += A(i, k) * B(k, l) * C(l, j); } template void dot21(Vec& r, const Mat0& A, const Mat1& B) { // assert(A.shape(1) == B.shape(0)); r = 0; for (std::size_t i = 0; i < r.shape(0); ++i) for (std::size_t k = 0; k < A.shape(1); ++k) r[i] += A(i, k) * B[k]; } template void identity(Vec0& r, const Vec1& U, const Mat0& /*J*/, double /*detJ*/, const Mat1& /*K*/) { r = U; } template void covariant_piola(O&& r, const P& U, const Q& /*J*/, double /*detJ*/, const R& K) { auto Kt = xt::transpose(K); for (std::size_t p = 0; p < U.shape(0); ++p) { auto r_p = xt::row(r, p); auto U_p = xt::row(U, p); dot21(r_p, Kt, U_p); } } template void contravariant_piola(O&& r, const P& U, const Q& J, double detJ, const R& /*K*/) { for (std::size_t p = 0; p < U.shape(0); ++p) { auto r_p = xt::row(r, p); auto U_p = xt::row(U, p); dot21(r_p, J, U_p); } r /= detJ; } template void double_covariant_piola(O& r, const P& U, const Q& J, double /*detJ*/, const R& K) { for (std::size_t p = 0; p < U.shape(0); ++p) { auto r_p = xt::row(r, p); auto U_p = xt::row(U, p); auto _U = xt::reshape_view(U_p, {J.shape(1), J.shape(1)}); auto _r = xt::reshape_view(r_p, {K.shape(1), K.shape(1)}); dot22(_r, xt::transpose(K), _U, K); } } template void double_contravariant_piola(O& r, const P& U, const Q& J, double detJ, const R& /*K*/) { auto Jt = xt::transpose(J); for (std::size_t p = 0; p < U.shape(0); ++p) { auto r_p = xt::row(r, p); auto U_p = xt::row(U, p); auto _U = xt::reshape_view(U_p, {J.shape(1), J.shape(1)}); auto _r = xt::reshape_view(r_p, {J.shape(0), J.shape(0)}); dot22(_r, J, _U, Jt); } r /= (detJ * detJ); } } // namespace impl /// Apply a map to data. Note that the required input arguments depends /// on the type of map. /// /// @param[out] u The field after mapping, flattened with row-major /// layout /// @param[in] U The field to be mapped, flattened with row-major layout /// @param[in] J Jacobian of the map /// @param[in] detJ Determinant of `J` /// @param[in] K The inverse of `J` /// @param[in] map_type The map type template void apply_map(O&& u, const P& U, const Mat0& J, double detJ, const Mat1& K, maps::type map_type) { switch (map_type) { case maps::type::identity: return impl::identity(u, U, J, detJ, K); case maps::type::covariantPiola: return impl::covariant_piola(u, U, J, detJ, K); case maps::type::contravariantPiola: return impl::contravariant_piola(u, U, J, detJ, K); case maps::type::doubleCovariantPiola: return impl::double_covariant_piola(u, U, J, detJ, K); case maps::type::doubleContravariantPiola: return impl::double_contravariant_piola(u, U, J, detJ, K); default: throw std::runtime_error("Mapping not yet implemented"); } } } // namespace basix::maps basix-0.3.0/cpp/basix/moments.cpp000066400000000000000000000473261411115224000167010ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson & Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #include "moments.h" #include "cell.h" #include "finite-element.h" #include "polyset.h" #include "quadrature.h" #include #include #include #include #include #include #include using namespace basix; namespace { //---------------------------------------------------------------------------- std::vector axis_points(const cell::type celltype) { switch (celltype) { case cell::type::interval: return {1}; case cell::type::triangle: return {1, 2}; case cell::type::quadrilateral: return {1, 2}; case cell::type::tetrahedron: return {1, 2, 3}; case cell::type::hexahedron: return {1, 2, 4}; default: throw std::runtime_error( "Integrals of this entity type not yet implemented."); } } //---------------------------------------------------------------------------- // Map points defined on a cell entity into the full cell space // @param[in] celltype0 Parent cell type // @param[in] celltype1 Sub-entity of `celltype0` type // @param[in] x Coordinates defined on an entity of type `celltype1` // @return (0) Coordinates of points in the full space of `celltype1` // (the shape is (num_entities, num points per entity, tdim of // celltype0) and (1) local axes on each entity (num_entities, // entity_dim, tdim). template std::pair>, xt::xtensor> map_points(const cell::type celltype0, const cell::type celltype1, const P& x) { assert(x.dimension() == 2); const std::size_t tdim = cell::topological_dimension(celltype0); std::size_t entity_dim = cell::topological_dimension(celltype1); std::size_t num_entities = cell::num_sub_entities(celltype0, entity_dim); // xt::xtensor p({num_entities, x.shape(0), tdim}); std::vector> p(num_entities, xt::zeros({x.shape(0), tdim})); xt::xtensor axes({num_entities, entity_dim, tdim}); xt::xtensor axes_e({entity_dim, tdim}); const std::vector axis_pts = axis_points(celltype0); for (std::size_t e = 0; e < num_entities; ++e) { // Get entity geometry xt::xtensor entity_x = cell::sub_entity_geometry(celltype0, entity_dim, e); auto x0 = xt::row(entity_x, 0); // Axes on the cell entity for (std::size_t i = 0; i < entity_dim; ++i) xt::view(axes, e, i, xt::all()) = xt::row(entity_x, axis_pts[i]) - x0; // Compute x = x0 + \Delta x p[e] = xt::tile(xt::view(entity_x, xt::newaxis(), 0), x.shape(0)); axes_e = xt::view(axes, e, xt::all(), xt::all()); p[e] += xt::linalg::dot(x, axes_e); } return {p, axes}; } //---------------------------------------------------------------------------- } // namespace //----------------------------------------------------------------------------- xt::xtensor moments::create_dot_moment_dof_transformations( const FiniteElement& moment_space) { // This function can be dramatically simplified and made // understandable by using tensors to give more logic to the objects cell::type celltype = moment_space.cell_type(); if (celltype == cell::type::point) return {}; xt::xarray pts = moment_space.points(); if (pts.shape(1) == 1) pts.reshape({pts.shape(0)}); const xt::xtensor& P = moment_space.interpolation_matrix(); xt::xtensor tpts; xt::xtensor J, K; switch (celltype) { case cell::type::interval: { tpts = xt::atleast_3d(1.0 - pts); J = {{{-1.0}}}; K = {{{-1.0}}}; break; } case cell::type::triangle: { std::array shape = {2, pts.shape(0), pts.shape(1)}; tpts = xt::zeros(shape); J.resize({2, 2, 2}); K.resize({2, 2, 2}); xt::xtensor A = xt::zeros({2, 2}); for (std::size_t i = 0; i < pts.shape(0); ++i) { tpts(0, i, 0) = pts(i, 1); tpts(0, i, 1) = 1 - pts(i, 0) - pts(i, 1); tpts(1, i, 0) = pts(i, 1); tpts(1, i, 1) = pts(i, 0); } A = {{0, 1}, {-1, -1}}; xt::view(J, 0, xt::all(), xt::all()) = A; A = {{-1, -1}, {1, 0}}; xt::view(K, 0, xt::all(), xt::all()) = A; A = {{0, 1}, {1, 0}}; xt::view(J, 1, xt::all(), xt::all()) = A; A = {{0, 1}, {1, 0}}; xt::view(K, 1, xt::all(), xt::all()) = A; break; } case cell::type::quadrilateral: { std::array shape0 = {2, pts.shape(0), pts.shape(1)}; tpts = xt::zeros(shape0); J.resize({2, 2, 2}); K.resize({2, 2, 2}); xt::xtensor A = xt::zeros({2, 2}); for (std::size_t i = 0; i < pts.shape(0); ++i) { tpts(0, i, 0) = pts(i, 1); tpts(0, i, 1) = 1.0 - pts(i, 0); tpts(1, i, 0) = pts(i, 1); tpts(1, i, 1) = pts(i, 0); } A = {{0, 1}, {-1, 0}}; xt::view(J, 0, xt::all(), xt::all()) = A; A = {{0, -1}, {1, 0}}; xt::view(K, 0, xt::all(), xt::all()) = A; A = {{0, 1}, {1, 0}}; xt::view(J, 1, xt::all(), xt::all()) = A; A = {{0, 1}, {1, 0}}; xt::view(K, 1, xt::all(), xt::all()) = A; break; } default: { throw std::runtime_error( "DOF transformations only implemented for tdim <= 2."); } } std::array shape = {tpts.shape(0), (std::size_t)moment_space.dim(), (std::size_t)moment_space.dim()}; xt::xtensor out = xt::zeros(shape); for (std::size_t i = 0; i < tpts.shape(0); ++i) { auto _tpoint = xt::view(tpts, i, xt::all(), xt::all()); xt::xtensor moment_space_pts = xt::view( moment_space.tabulate(0, _tpoint), 0, xt::all(), xt::all(), xt::all()); // Tile the J and J^-1 for passing into the mapping function. This // could be avoided with some changes to calls to map functions // taking just one J and J^1 auto Ji = xt::tile(xt::view(J, i, xt::newaxis(), xt::all(), xt::all()), moment_space_pts.shape(0)); auto Ki = xt::tile(xt::view(K, i, xt::newaxis(), xt::all(), xt::all()), moment_space_pts.shape(0)); std::vector detJ(Ji.shape(0), 1.0); // Pull back basis function values to the reference cell (applied // map) const xt::xtensor F = moment_space.map_pull_back(moment_space_pts, Ji, detJ, Ki); // Copy onto 2D array xt::xtensor _pulled({F.shape(0), F.shape(1) * F.shape(2)}); for (std::size_t p = 0; p < F.shape(0); ++p) { { for (std::size_t i = 0; i < F.shape(1); ++i) for (std::size_t j = 0; j < F.shape(2); ++j) _pulled(p, j * F.shape(1) + i) = F(p, i, j); } } // Apply interpolation matrix to transformed basis function values xt::xtensor Pview, phi_transformed; for (int v = 0; v < moment_space.value_size(); ++v) { Pview = xt::view( P, xt::range(0, P.shape(0)), xt::range(v * _pulled.shape(0), (v + 1) * _pulled.shape(0))); phi_transformed = xt::view( _pulled, xt::range(0, _pulled.shape(0)), xt::range(moment_space.dim() * v, moment_space.dim() * (v + 1))); xt::view(out, i, xt::all(), xt::all()) += xt::linalg::dot(Pview, phi_transformed); } } return out; } //---------------------------------------------------------------------------- xt::xtensor moments::create_moment_dof_transformations(const FiniteElement& moment_space) { const xt::xtensor t = create_dot_moment_dof_transformations(moment_space); xt::xtensor rot = xt::zeros({2, 2}); xt::xtensor ref = xt::zeros({2, 2}); cell::type celltype = moment_space.cell_type(); switch (celltype) { case cell::type::interval: return t; case cell::type::triangle: rot = {{-1, -1}, {1, 0}}; ref = {{0, 1}, {1, 0}}; break; case cell::type::quadrilateral: // TODO: check that these are correct rot = {{0, -1}, {1, 0}}; ref = {{0, 1}, {1, 0}}; break; default: throw std::runtime_error("Unexpected cell type"); } const std::size_t scalar_dofs = t.shape(1); xt::xtensor M({2, 2 * scalar_dofs, 2 * scalar_dofs}); for (std::size_t i = 0; i < scalar_dofs; ++i) { for (std::size_t j = 0; j < scalar_dofs; ++j) { xt::view(M, 0, xt::range(2 * i, 2 * i + 2), xt::range(2 * j, 2 * j + 2)) = t(0, i, j) * rot; } } for (std::size_t i = 0; i < scalar_dofs; ++i) { for (std::size_t j = 0; j < scalar_dofs; ++j) { xt::view(M, 1, xt::range(2 * i, 2 * i + 2), xt::range(2 * j, 2 * j + 2)) = t(1, i, j) * ref; } } return M; } //---------------------------------------------------------------------------- xt::xtensor moments::create_normal_moment_dof_transformations( const FiniteElement& moment_space) { xt::xtensor t = create_dot_moment_dof_transformations(moment_space); const int tdim = cell::topological_dimension(moment_space.cell_type()); if (tdim == 1 or tdim == 2) xt::view(t, tdim - 1, xt::all(), xt::all()) *= -1.0; return t; } //---------------------------------------------------------------------------- xt::xtensor moments::create_tangent_moment_dof_transformations( const FiniteElement& moment_space) { const int tdim = cell::topological_dimension(moment_space.cell_type()); if (tdim != 1) throw std::runtime_error("Tangent is only well-defined on an edge."); xt::xtensor t = create_dot_moment_dof_transformations(moment_space); xt::view(t, 0, xt::all(), xt::all()) *= -1.0; return t; } //---------------------------------------------------------------------------- std::pair>, std::vector>> moments::make_integral_moments(const FiniteElement& V, cell::type celltype, std::size_t value_size, int q_deg) { const cell::type sub_celltype = V.cell_type(); const std::size_t entity_dim = cell::topological_dimension(sub_celltype); if (entity_dim == 0) throw std::runtime_error("Cannot integrate over a dimension 0 entity."); const std::size_t num_entities = cell::num_sub_entities(celltype, entity_dim); // Get the quadrature points and weights auto [pts, _wts] = quadrature::make_quadrature("default", sub_celltype, q_deg); auto wts = xt::adapt(_wts); if (pts.dimension() == 1) pts = pts.reshape({pts.shape(0), 1}); // Evaluate moment space at quadrature points assert(V.value_size() == 1); const xt::xtensor phi = xt::view(V.tabulate(0, pts), 0, xt::all(), xt::all(), 0); // Pad out \phi moment is against a vector-valued function std::size_t vdim = value_size == 1 ? 1 : entity_dim; // Storage for the interpolation matrix const std::size_t num_dofs = vdim * phi.shape(1); const std::array shape = {num_dofs, value_size, pts.shape(0)}; std::vector> D(num_entities, xt::zeros(shape)); // Map quadrature points onto facet (cell entity e) const auto [points, axes] = map_points(celltype, sub_celltype, pts); // Compute entity integral moments // Iterate over cell entities if (value_size == 1) { for (std::size_t e = 0; e < num_entities; ++e) { for (std::size_t i = 0; i < phi.shape(1); ++i) { auto phi_i = xt::col(phi, i); xt::view(D[e], i, 0, xt::all()) = phi_i * wts; } } } else { for (std::size_t e = 0; e < num_entities; ++e) { // Loop over each 'dof' on an entity (moment basis function index) for (std::size_t i = 0; i < phi.shape(1); ++i) { auto phi_i = xt::col(phi, i); // TODO: Pad-out phi and call a updated // make_dot_integral_moments // FIXME: This assumed that the moment space has a certain // mapping type for (std::size_t d = 0; d < entity_dim; ++d) { // TODO: check that dof index is correct const std::size_t dof = i * entity_dim + d; for (std::size_t k = 0; k < value_size; ++k) xt::view(D[e], dof, k, xt::all()) = phi_i * wts * axes(e, d, k); } } } } return {points, D}; } //---------------------------------------------------------------------------- std::pair>, std::vector>> moments::make_dot_integral_moments(const FiniteElement& V, cell::type celltype, std::size_t value_size, int q_deg) { const cell::type sub_celltype = V.cell_type(); const std::size_t entity_dim = cell::topological_dimension(sub_celltype); const std::size_t num_entities = cell::num_sub_entities(celltype, entity_dim); const std::size_t tdim = cell::topological_dimension(celltype); auto [pts, _wts] = quadrature::make_quadrature("default", sub_celltype, q_deg); auto wts = xt::adapt(_wts); // If this is always true, value_size input can be removed assert(tdim == value_size); // Evaluate moment space at quadrature points xt::xtensor phi = xt::view(V.tabulate(0, pts), 0, xt::all(), xt::all(), xt::all()); assert(phi.shape(2) == entity_dim); // Note: // Number of quadrature points per entity: phi.shape(0) // Dimension of the moment space on each entity: phi.shape(1) // Value size of the moment function: phi.shape(2) // Map quadrature points onto facet (cell entity e) auto [points, axes] = map_points(celltype, sub_celltype, pts); // Shape (num dofs, value size, num points) const std::array shape = {phi.shape(1), value_size, pts.shape(0)}; std::vector> D(num_entities, xt::zeros(shape)); // Compute entity integral moments // Iterate over cell entities for (std::size_t e = 0; e < num_entities; ++e) { // Loop over each 'dof' on an entity (moment basis function index) for (std::size_t dof = 0; dof < phi.shape(1); ++dof) { // Loop over value size of function to which moment function is // applied for (std::size_t j = 0; j < value_size; ++j) { // Loop over value topological dimension of cell entity (which // is equal to phi.shape(2)) for (std::size_t d = 0; d < phi.shape(2); ++d) { // Add quadrature point on cell entity contributions xt::view(D[e], dof, j, xt::all()) += wts * xt::view(phi, xt::all(), dof, d) * axes(e, d, j); } } } } return {points, D}; } //---------------------------------------------------------------------------- std::pair>, std::vector>> moments::make_tangent_integral_moments(const FiniteElement& V, cell::type celltype, std::size_t value_size, int q_deg) { const cell::type sub_celltype = V.cell_type(); const std::size_t entity_dim = cell::topological_dimension(sub_celltype); const std::size_t num_entities = cell::num_sub_entities(celltype, entity_dim); const std::size_t tdim = cell::topological_dimension(celltype); // If this is always true, value_size input can be removed assert(tdim == value_size); if (entity_dim != 1) throw std::runtime_error("Tangent is only well-defined on an edge."); auto [pts, _wts] = quadrature::make_quadrature("default", cell::type::interval, q_deg); auto wts = xt::adapt(_wts); // Evaluate moment space at quadrature points assert(V.value_size() == 1); xt::xtensor phi = xt::view(V.tabulate(0, pts), 0, xt::all(), xt::all(), 0); std::vector> points( num_entities, xt::zeros({pts.shape(0), tdim})); const std::array shape = {phi.shape(1), value_size, phi.shape(0)}; std::vector> D(num_entities, xt::zeros(shape)); // Iterate over cell entities for (std::size_t e = 0; e < num_entities; ++e) { xt::xtensor edge_x = cell::sub_entity_geometry(celltype, 1, e); auto X0 = xt::row(edge_x, 0); auto tangent = xt::row(edge_x, 1) - X0; // No need to normalise the tangent, as the size of this is equal to // the integral Jacobian // Map quadrature points onto triangle edge for (std::size_t i = 0; i < pts.shape(0); ++i) xt::view(points[e], i, xt::all()) = X0 + pts[i] * tangent; // Compute edge tangent integral moments for (std::size_t i = 0; i < phi.shape(1); ++i) { auto phi_i = xt::col(phi, i); for (std::size_t j = 0; j < value_size; ++j) xt::view(D[e], i, j, xt::all()) = phi_i * wts * tangent[j]; } } return {points, D}; } //---------------------------------------------------------------------------- std::pair>, std::vector>> moments::make_normal_integral_moments(const FiniteElement& V, cell::type celltype, std::size_t value_size, int q_deg) { const std::size_t tdim = cell::topological_dimension(celltype); assert(tdim == value_size); const cell::type sub_celltype = V.cell_type(); const std::size_t entity_dim = cell::topological_dimension(sub_celltype); const std::size_t num_entities = cell::num_sub_entities(celltype, entity_dim); if (static_cast(entity_dim) != static_cast(tdim) - 1) throw std::runtime_error("Normal is only well-defined on a facet."); // Compute quadrature points for evaluating integral auto [pts, _wts] = quadrature::make_quadrature("default", sub_celltype, q_deg); auto wts = xt::adapt(_wts); // Evaluate moment space at quadrature points assert(V.value_size() == 1); xt::xtensor phi = xt::view(V.tabulate(0, pts), 0, xt::all(), xt::all(), 0); // Storage for coordinates of evaluations points in the reference cell std::vector> points( num_entities, xt::zeros({pts.shape(0), tdim})); // Storage for interpolation matrix const std::array shape = {phi.shape(1), value_size, phi.shape(0)}; std::vector> D(num_entities, xt::zeros(shape)); // Evaluate moment space at quadrature points // Iterate over cell entities xt::xtensor normal; for (std::size_t e = 0; e < num_entities; ++e) { // Map quadrature points onto facet (cell entity e) xt::xtensor facet_x = cell::sub_entity_geometry(celltype, tdim - 1, e); auto x0 = xt::row(facet_x, 0); if (tdim == 2) { // No need to normalise the normal, as the size of this is equal // to the integral jacobian auto tangent = xt::row(facet_x, 1) - x0; normal = {-tangent(1), tangent(0)}; for (std::size_t p = 0; p < pts.shape(0); ++p) xt::view(points[e], p, xt::all()) = x0 + pts[p] * tangent; } else if (tdim == 3) { // No need to normalise the normal, as the size of this is equal // to the integral Jacobian auto t0 = xt::row(facet_x, 1) - x0; auto t1 = xt::row(facet_x, 2) - x0; normal = xt::linalg::cross(t0, t1); for (std::size_t p = 0; p < pts.shape(0); ++p) { xt::view(points[e], p, xt::all()) = x0 + pts(p, 0) * t0 + pts(p, 1) * t1; } } else throw std::runtime_error("Normal on this cell cannot be computed."); // Compute facet normal integral moments for (std::size_t i = 0; i < phi.shape(1); ++i) { auto phi_i = xt::col(phi, i); for (std::size_t j = 0; j < value_size; ++j) xt::view(D[e], i, j, xt::all()) = phi_i * wts * normal[j]; } } return {points, D}; } //---------------------------------------------------------------------------- basix-0.3.0/cpp/basix/moments.h000066400000000000000000000160121411115224000163320ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson & Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include "cell.h" #include namespace basix { class FiniteElement; /// ## Integral moments /// These functions generate dual set matrices for integral moments /// against spaces on a subentity of the cell namespace moments { /// Create the dof transformations for the DOFs defined using a dot /// integral moment. /// /// A dot integral moment is defined by /// \f[l_i(v) = \int v\cdot\phi_i,\f] /// where \f$\phi_i\f$ is a basis function in the moment space, and \f$v\f$ and /// \f$\phi_i\f$ are either both scalars or are vectors of the same size. /// /// If the moment space is an interval, this returns one matrix /// representing the reversal of the interval. If the moment space is a /// face, this returns two matrices: one representing a rotation, the /// other a reflection. /// /// These matrices are computed by calculation the interpolation /// coefficients of a rotated/reflected basis into the original basis. /// /// @param[in] moment_space The finite element space that the integral /// moment is taken against /// @return A list of dof transformations xt::xtensor create_dot_moment_dof_transformations(const FiniteElement& moment_space); /// Create the DOF transformations for the DOFs defined using an integral /// moment. /// /// An integral moment is defined by /// \f[l_{i,j}(v) = \int v\cdot e_j\phi_i,\f] /// where \f$\phi_i\f$ is a basis function in the moment space, \f$e_j\f$ is a /// coordinate direction (of the cell sub-entity the moment is taken on), /// \f$v\f$ is a vector, and \f$\phi_i\f$ is a scalar. /// /// This will combine multiple copies of the result of /// `create_dot_moment_dof_transformations` to give the transformations /// for integral moments of each vector component against the moment /// space. /// /// @param[in] moment_space The finite element space that the integral /// moment is taken against /// @return A list of dof transformations xt::xtensor create_moment_dof_transformations(const FiniteElement& moment_space); /// Create the dof transformations for the DOFs defined using a normal /// integral moment. /// /// A normal integral moment is defined by /// \f[l_{i,j}(v) = \int v\cdot n\phi_i,\f] /// where \f$\phi_i\f$ is a basis function in the moment space, \f$n\f$ is /// normal to the cell sub-entity, \f$v\f$ is a vector, and \f$\phi_i\f$ is a /// scalar. /// /// This does the same as `create_dot_moment_dof_transformations` with /// some additional factors of -1 to account for the changing of the /// normal direction when the entity is reflected. /// /// @param[in] moment_space The finite element space that the integral /// moment is taken against /// @return A list of dof transformations xt::xtensor create_normal_moment_dof_transformations(const FiniteElement& moment_space); /// Create the dof transformations for the DOFs defined using a /// tangential integral moment. /// /// A tangential integral moment is defined by /// \f[l_{i,j}(v) = \int v\cdot t\phi_i,\f] /// where \f$\phi_i\f$ is a basis function in the moment space, \f$t\f$ is /// tangential to the edge, \f$v\f$ is a vector, and \f$\phi_i\f$ is a scalar. /// /// This does the same as `create_dot_moment_dof_transformations` with /// some additional factors of -1 to account for the changing of the /// tangent direction when the edge is reflected. /// /// @param[in] moment_space The finite element space that the integral /// moment is taken against /// @return A list of dof transformations xt::xtensor create_tangent_moment_dof_transformations(const FiniteElement& moment_space); /// Make interpolation points and weights for simple integral moments /// /// These will represent the integral of each function in the moment /// space over each sub entity of the moment space's cell type in a cell /// with the given type. For example, if the input cell type is a /// triangle, and the moment space is a P1 space on an edge, this will /// perform two integrals for each of the 3 edges of the triangle. /// /// @param moment_space The space to compute the integral moments against /// @param celltype The cell type of the cell on which the space is /// being defined /// @param value_size The value size of the space being defined /// @param q_deg The quadrature degree used for the integrals std::pair>, std::vector>> make_integral_moments(const FiniteElement& moment_space, cell::type celltype, std::size_t value_size, int q_deg); /// Make interpolation points and weights for dot product integral /// moments /// /// These will represent the integral of each function in the moment /// space over each sub entity of the moment space's cell type in a cell /// with the given type. For example, if the input cell type is a /// triangle and the moment space is a P1 space on an edge, this will /// perform two integrals for each of the 3 edges of the triangle. /// /// @todo Clarify what happens value size of the moment space is less /// than `value_size`. /// /// @param V The space to compute the integral moments against /// @param celltype The cell type of the cell on which the space is being /// defined /// @param value_size The value size of the space being defined /// @param q_deg The quadrature degree used for the integrals std::pair>, std::vector>> make_dot_integral_moments(const FiniteElement& V, cell::type celltype, std::size_t value_size, int q_deg); /// Make interpolation points and weights for tangent integral moments /// /// These can only be used when the moment space is defined on edges of /// the cell /// /// @param V The space to compute the integral moments against /// @param celltype The cell type of the cell on which the space is /// being defined /// @param value_size The value size of the space being defined the /// space /// @param q_deg The quadrature degree used for the integrals std::pair>, std::vector>> make_tangent_integral_moments(const FiniteElement& V, cell::type celltype, std::size_t value_size, int q_deg); /// Compute interpolation points and weights for normal integral moments /// /// These can only be used when the moment space is defined on facets of /// the cell /// /// @param[in] V The space to compute the integral moments against /// @param[in] celltype The cell type of the cell on which the space is /// being defined /// @param[in] value_size The value size of the space being defined /// @param[in] q_deg The quadrature degree used for the integrals /// @return (interpolation points, interpolation matrix) std::pair>, std::vector>> make_normal_integral_moments(const FiniteElement& V, cell::type celltype, std::size_t value_size, int q_deg); } // namespace moments } // namespace basix basix-0.3.0/cpp/basix/nce-rtc.cpp000066400000000000000000000255321411115224000165450ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson & Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #include "nce-rtc.h" #include "element-families.h" #include "lagrange.h" #include "log.h" #include "maps.h" #include "moments.h" #include "polyset.h" #include "quadrature.h" #include #include #include #include #include #include using namespace basix; //---------------------------------------------------------------------------- FiniteElement basix::create_rtc(cell::type celltype, int degree) { if (celltype != cell::type::quadrilateral and celltype != cell::type::hexahedron) { throw std::runtime_error("Unsupported cell type"); } if (degree > 4) { // TODO: suggest alternative with non-uniform points once implemented LOG(WARNING) << "RTC spaces with high degree using equally spaced" << " points are unstable."; } const std::size_t tdim = cell::topological_dimension(celltype); const cell::type facettype = (tdim == 2) ? cell::type::interval : cell::type::quadrilateral; // Evaluate the expansion polynomials at the quadrature points auto [pts, _wts] = quadrature::make_quadrature("default", celltype, 2 * degree); auto Qwts = xt::adapt(_wts); xt::xtensor phi = xt::view( polyset::tabulate(celltype, degree, 0, pts), 0, xt::all(), xt::all()); // The number of order (degree) polynomials const std::size_t psize = phi.shape(1); const int facet_count = tdim == 2 ? 4 : 6; const int facet_dofs = polyset::dim(facettype, degree - 1); const int internal_dofs = tdim == 2 ? 2 * degree * (degree - 1) : 3 * degree * degree * (degree - 1); const std::size_t ndofs = facet_count * facet_dofs + internal_dofs; // Create coefficients for order (degree-1) vector polynomials xt::xtensor wcoeffs = xt::zeros({ndofs, psize * tdim}); const int nv_interval = polyset::dim(cell::type::interval, degree); const int ns_interval = polyset::dim(cell::type::interval, degree - 1); int dof = 0; if (tdim == 2) { for (std::size_t d = 0; d < tdim; ++d) for (int i = 0; i < ns_interval; ++i) for (int j = 0; j < ns_interval; ++j) wcoeffs(dof++, psize * d + i * nv_interval + j) = 1; } else { for (std::size_t d = 0; d < tdim; ++d) for (int i = 0; i < ns_interval; ++i) for (int j = 0; j < ns_interval; ++j) for (int k = 0; k < ns_interval; ++k) wcoeffs(dof++, psize * d + i * nv_interval * nv_interval + j * nv_interval + k) = 1; } // Create coefficients for additional polynomials in the div space for (int i = 0; i < pow(degree, tdim - 1); ++i) { std::vector indices(tdim - 1); if (tdim == 2) indices[0] = i; else { indices[0] = i / degree; indices[1] = i % degree; } for (std::size_t d = 0; d < tdim; ++d) { int n = 0; xt::xtensor integrand = xt::pow(xt::col(pts, d), degree); for (std::size_t c = 0; c < tdim; ++c) { if (c != d) { integrand *= xt::pow(xt::col(pts, c), indices[n]); ++n; } } for (std::size_t k = 0; k < psize; ++k) { const double w_sum = xt::sum(Qwts * integrand * xt::col(phi, k))(); wcoeffs(dof, k + psize * d) = w_sum; } ++dof; } } // Quadrature degree int quad_deg = 2 * degree; std::array>, 4> M; std::array>, 4> x; FiniteElement moment_space = create_dlagrange(facettype, degree - 1); std::tie(x[tdim - 1], M[tdim - 1]) = moments::make_normal_integral_moments( moment_space, celltype, tdim, quad_deg); xt::xtensor facet_transforms = moments::create_normal_moment_dof_transformations(moment_space); // Add integral moments on interior if (degree > 1) { std::tie(x[tdim], M[tdim]) = moments::make_dot_integral_moments( create_nce(celltype, degree - 1), celltype, tdim, quad_deg); } const std::vector>> topology = cell::topology(celltype); std::map> entity_transformations; if (tdim == 2) { entity_transformations[cell::type::interval] = facet_transforms; } else if (tdim == 3) { entity_transformations[cell::type::interval] = xt::xtensor({1, 0, 0}); entity_transformations[cell::type::quadrilateral] = facet_transforms; } xt::xtensor coeffs = compute_expansion_coefficients( celltype, wcoeffs, {M[tdim - 1], M[tdim]}, {x[tdim - 1], x[tdim]}, degree); return FiniteElement(element::family::RT, celltype, degree, {tdim}, coeffs, entity_transformations, x, M, maps::type::contravariantPiola); } //----------------------------------------------------------------------------- FiniteElement basix::create_nce(cell::type celltype, int degree) { if (celltype != cell::type::quadrilateral and celltype != cell::type::hexahedron) throw std::runtime_error("Unsupported cell type"); if (degree > 4) { // TODO: suggest alternative with non-uniform points once implemented LOG(WARNING) << "NC spaces with high degree using equally spaced" << " points are unstable."; } const std::size_t tdim = cell::topological_dimension(celltype); // Evaluate the expansion polynomials at the quadrature points auto [pts, _wts] = quadrature::make_quadrature("default", celltype, 2 * degree); auto wts = xt::adapt(_wts); xt::xtensor phi = xt::view( polyset::tabulate(celltype, degree, 0, pts), 0, xt::all(), xt::all()); // The number of order (degree) polynomials const int psize = phi.shape(1); const int edge_count = tdim == 2 ? 4 : 12; const int edge_dofs = polyset::dim(cell::type::interval, degree - 1); const int face_count = tdim == 2 ? 1 : 6; const int face_dofs = 2 * degree * (degree - 1); const int volume_count = tdim == 2 ? 0 : 1; const int volume_dofs = 3 * degree * (degree - 1) * (degree - 1); const std::size_t ndofs = edge_count * edge_dofs + face_count * face_dofs + volume_count * volume_dofs; // Create coefficients for order (degree-1) vector polynomials xt::xtensor wcoeffs = xt::zeros({ndofs, psize * tdim}); const int nv_interval = polyset::dim(cell::type::interval, degree); const int ns_interval = polyset::dim(cell::type::interval, degree - 1); int dof = 0; if (tdim == 2) { for (std::size_t d = 0; d < tdim; ++d) for (int i = 0; i < ns_interval; ++i) for (int j = 0; j < ns_interval; ++j) wcoeffs(dof++, psize * d + i * nv_interval + j) = 1; } else { for (std::size_t d = 0; d < tdim; ++d) for (int i = 0; i < ns_interval; ++i) for (int j = 0; j < ns_interval; ++j) for (int k = 0; k < ns_interval; ++k) wcoeffs(dof++, psize * d + i * nv_interval * nv_interval + j * nv_interval + k) = 1; } // Create coefficients for additional polynomials in the curl space xt::xtensor integrand; switch (tdim) { case 2: { for (int i = 0; i < degree; ++i) { for (std::size_t d = 0; d < tdim; ++d) { integrand = xt::col(pts, 1 - d); for (int j = 1; j < degree; ++j) integrand *= xt::col(pts, 1 - d); for (int j = 0; j < i; ++j) integrand *= xt::col(pts, d); for (int k = 0; k < psize; ++k) { const double w_sum = xt::sum(wts * integrand * xt::col(phi, k))(); wcoeffs(dof, k + psize * d) = w_sum; } ++dof; } } break; } default: for (int i = 0; i < degree; ++i) { for (int j = 0; j < degree + 1; ++j) { for (std::size_t c = 0; c < tdim; ++c) { for (std::size_t d = 0; d < tdim; ++d) { if (d != c) { const std::size_t e = (c == 0 || d == 0) ? ((c == 1 || d == 1) ? 2 : 1) : 0; if (c < e and j == degree) continue; integrand = xt::col(pts, e); for (int k = 1; k < degree; ++k) integrand *= xt::col(pts, e); for (int k = 0; k < i; ++k) integrand *= xt::col(pts, d); for (int k = 0; k < j; ++k) integrand *= xt::col(pts, c); for (int k = 0; k < psize; ++k) { const double w_sum = xt::sum(wts * integrand * xt::col(phi, k))(); wcoeffs(dof, k + psize * d) = w_sum; } ++dof; } } } } } } // quadrature degree int quad_deg = 2 * degree; std::array>, 4> M; std::array>, 4> x; FiniteElement edge_moment_space = create_dlagrange(cell::type::interval, degree - 1); std::tie(x[1], M[1]) = moments::make_tangent_integral_moments( edge_moment_space, celltype, tdim, quad_deg); xt::xtensor edge_transforms = moments::create_tangent_moment_dof_transformations(edge_moment_space); // Add integral moments on interior xt::xtensor face_transforms; if (degree > 1) { // Face integral moment FiniteElement moment_space = create_rtc(cell::type::quadrilateral, degree - 1); std::tie(x[2], M[2]) = moments::make_dot_integral_moments( moment_space, celltype, tdim, quad_deg); if (tdim == 3) { face_transforms = moments::create_dot_moment_dof_transformations(moment_space); // Interior integral moment std::tie(x[3], M[3]) = moments::make_dot_integral_moments( create_rtc(cell::type::hexahedron, degree - 1), celltype, tdim, quad_deg); } } const std::vector>> topology = cell::topology(celltype); std::map> entity_transformations; entity_transformations[cell::type::interval] = edge_transforms; if (tdim == 3) { if (degree == 1) { entity_transformations[cell::type::quadrilateral] = xt::xtensor({2, 0, 0}); } else { entity_transformations[cell::type::quadrilateral] = face_transforms; } } xt::xtensor coeffs = compute_expansion_coefficients( celltype, wcoeffs, {M[1], M[2], M[3]}, {x[1], x[2], x[3]}, degree); return FiniteElement(element::family::N1E, celltype, degree, {tdim}, coeffs, entity_transformations, x, M, maps::type::covariantPiola); } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/nce-rtc.h000066400000000000000000000007231411115224000162050ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson & Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include "cell.h" #include "finite-element.h" namespace basix { /// Create RTC H(div) element /// @param celltype /// @param degree FiniteElement create_rtc(cell::type celltype, int degree); /// Create NC H(curl) element /// @param celltype /// @param degree FiniteElement create_nce(cell::type celltype, int degree); } // namespace basix basix-0.3.0/cpp/basix/nedelec.cpp000066400000000000000000000337761411115224000166220ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson & Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #include "nedelec.h" #include "element-families.h" #include "lagrange.h" #include "maps.h" #include "moments.h" #include "polyset.h" #include "quadrature.h" #include "raviart-thomas.h" #include #include #include #include #include #include using namespace basix; namespace { //----------------------------------------------------------------------------- xt::xtensor create_nedelec_2d_space(int degree) { // Number of order (degree) vector polynomials const std::size_t nv = degree * (degree + 1) / 2; // Number of order (degree-1) vector polynomials const std::size_t ns0 = (degree - 1) * degree / 2; // Number of additional polynomials in Nedelec set const std::size_t ns = degree; // Tabulate polynomial set at quadrature points const auto [pts, _wts] = quadrature::make_quadrature( "default", cell::type::triangle, 2 * degree); const auto wts = xt::adapt(_wts); const xt::xtensor phi = xt::view(polyset::tabulate(cell::type::triangle, degree, 0, pts), 0, xt::all(), xt::all()); const std::size_t psize = phi.shape(1); // Create coefficients for order (degree-1) vector polynomials xt::xtensor wcoeffs = xt::zeros({nv * 2 + ns, psize * 2}); xt::view(wcoeffs, xt::range(0, nv), xt::range(0, nv)) = xt::eye(nv); xt::view(wcoeffs, xt::range(nv, 2 * nv), xt::range(psize, psize + nv)) = xt::eye(nv); // Create coefficients for the additional Nedelec polynomials for (std::size_t i = 0; i < ns; ++i) { auto p = xt::col(phi, ns0 + i); for (std::size_t k = 0; k < psize; ++k) { auto pk = xt::col(phi, k); wcoeffs(2 * nv + i, k) = xt::sum(wts * p * xt::col(pts, 1) * pk)(); wcoeffs(2 * nv + i, k + psize) = xt::sum(-wts * p * xt::col(pts, 0) * pk)(); } } return wcoeffs; } //----------------------------------------------------------------------------- std::pair>, 4>, std::array>, 4>> create_nedelec_2d_interpolation(int degree) { const int quad_deg = 5 * degree; std::array>, 4> x; std::array>, 4> M; // Integral representation for the boundary (edge) dofs std::tie(x[1], M[1]) = moments::make_tangent_integral_moments( create_dlagrange(cell::type::interval, degree - 1), cell::type::triangle, 2, quad_deg); if (degree > 1) { std::tie(x[2], M[2]) = moments::make_integral_moments( create_dlagrange(cell::type::triangle, degree - 2), cell::type::triangle, 2, quad_deg); } return {x, M}; } //----------------------------------------------------------------------------- std::map> create_nedelec_2d_entity_transforms(int degree) { std::map> entity_transformations; xt::xtensor edge_transforms = moments::create_tangent_moment_dof_transformations( create_dlagrange(cell::type::interval, degree - 1)); entity_transformations[cell::type::interval] = edge_transforms; return entity_transformations; } //----------------------------------------------------------------------------- xt::xtensor create_nedelec_3d_space(int degree) { // Reference tetrahedron const std::size_t tdim = 3; // Number of order (degree) vector polynomials const std::size_t nv = degree * (degree + 1) * (degree + 2) / 6; // Number of order (degree-1) vector polynomials const std::size_t ns0 = (degree - 1) * degree * (degree + 1) / 6; // Number of additional Nedelec polynomials that could be added const std::size_t ns = degree * (degree + 1) / 2; // Number of polynomials that would be included that are not // independent so are removed const std::size_t ns_remove = degree * (degree - 1) / 2; // Number of dofs in the space, ie size of polynomial set const std::size_t ndofs = 6 * degree + 4 * degree * (degree - 1) + (degree - 2) * (degree - 1) * degree / 2; // Tabulate polynomial basis at quadrature points const auto [pts, _wts] = quadrature::make_quadrature( "default", cell::type::tetrahedron, 2 * degree); const auto wts = xt::adapt(_wts); xt::xtensor phi = xt::view(polyset::tabulate(cell::type::tetrahedron, degree, 0, pts), 0, xt::all(), xt::all()); const std::size_t psize = phi.shape(1); // Create coefficients for order (degree-1) polynomials xt::xtensor wcoeffs = xt::zeros({ndofs, psize * tdim}); for (std::size_t i = 0; i < tdim; ++i) { auto range0 = xt::range(nv * i, nv * i + nv); auto range1 = xt::range(psize * i, psize * i + nv); xt::view(wcoeffs, range0, range1) = xt::eye(nv); } // Create coefficients for additional Nedelec polynomials auto p0 = xt::col(pts, 0); auto p1 = xt::col(pts, 1); auto p2 = xt::col(pts, 2); for (std::size_t i = 0; i < ns; ++i) { auto p = xt::col(phi, ns0 + i); for (std::size_t k = 0; k < psize; ++k) { const double w = xt::sum(wts * p * p2 * xt::col(phi, k))(); // Don't include polynomials (*, *, 0) that are dependant if (i >= ns_remove) wcoeffs(tdim * nv + i - ns_remove, psize + k) = -w; wcoeffs(tdim * nv + i + ns - ns_remove, k) = w; } } for (std::size_t i = 0; i < ns; ++i) { auto p = xt::col(phi, ns0 + i); for (std::size_t k = 0; k < psize; ++k) { const double w = xt::sum(wts * p * p1 * xt::col(phi, k))(); wcoeffs(tdim * nv + i + ns * 2 - ns_remove, k) = -w; // Don't include polynomials (*, *, 0) that are dependant if (i >= ns_remove) wcoeffs(tdim * nv + i - ns_remove, psize * 2 + k) = w; } } for (std::size_t i = 0; i < ns; ++i) { auto p = xt::col(phi, ns0 + i); for (std::size_t k = 0; k < psize; ++k) { const double w = xt::sum(wts * p * p0 * xt::col(phi, k))(); wcoeffs(tdim * nv + i + ns - ns_remove, psize * 2 + k) = -w; wcoeffs(tdim * nv + i + ns * 2 - ns_remove, psize + k) = w; } } return wcoeffs; } //----------------------------------------------------------------------------- std::pair>, 4>, std::array>, 4>> create_nedelec_3d_interpolation(int degree) { // Number of dofs and interpolation points const int quad_deg = 5 * degree; std::array>, 4> x; std::array>, 4> M; std::tie(x[1], M[1]) = moments::make_tangent_integral_moments( create_dlagrange(cell::type::interval, degree - 1), cell::type::tetrahedron, 3, quad_deg); if (degree > 1) { std::tie(x[2], M[2]) = moments::make_integral_moments( create_dlagrange(cell::type::triangle, degree - 2), cell::type::tetrahedron, 3, quad_deg); } if (degree > 2) { std::tie(x[3], M[3]) = moments::make_integral_moments( create_dlagrange(cell::type::tetrahedron, degree - 3), cell::type::tetrahedron, 3, quad_deg); } return {x, M}; } //----------------------------------------------------------------------------- std::map> create_nedelec_3d_entity_transforms(int degree) { std::map> entity_transformations; const xt::xtensor edge_transforms = moments::create_tangent_moment_dof_transformations( create_dlagrange(cell::type::interval, degree - 1)); entity_transformations[cell::type::interval] = edge_transforms; // Faces if (degree > 1) { xt::xtensor face_transforms = moments::create_moment_dof_transformations( create_dlagrange(cell::type::triangle, degree - 2)); entity_transformations[cell::type::triangle] = face_transforms; } else { entity_transformations[cell::type::triangle] = xt::xtensor({2, 0, 0}); } return entity_transformations; } //----------------------------------------------------------------------------- std::pair>, 4>, std::array>, 4>> create_nedelec2_2d_interpolation(int degree) { const int quad_deg = 5 * degree; std::array>, 4> x; std::array>, 4> M; std::tie(x[1], M[1]) = moments::make_tangent_integral_moments( create_dlagrange(cell::type::interval, degree), cell::type::triangle, 2, quad_deg); if (degree > 1) { std::tie(x[2], M[2]) = moments::make_dot_integral_moments( create_rt(cell::type::triangle, degree - 1), cell::type::triangle, 2, quad_deg); } return {x, M}; } //----------------------------------------------------------------------------- std::map> create_nedelec2_2d_entity_transformations(int degree) { std::map> entity_transformations; xt::xtensor edge_transforms = moments::create_tangent_moment_dof_transformations( create_dlagrange(cell::type::interval, degree)); entity_transformations[cell::type::interval] = edge_transforms; return entity_transformations; } //----------------------------------------------------------------------------- std::pair>, 4>, std::array>, 4>> create_nedelec2_3d_interpolation(int degree) { // Create quadrature scheme on the edge const int quad_deg = 5 * degree; std::array>, 4> x; std::array>, 4> M; // Integral representation for the boundary (edge) dofs std::tie(x[1], M[1]) = moments::make_tangent_integral_moments( create_dlagrange(cell::type::interval, degree), cell::type::tetrahedron, 3, quad_deg); if (degree > 1) { // Integral moments on faces std::tie(x[2], M[2]) = moments::make_dot_integral_moments( create_rt(cell::type::triangle, degree - 1), cell::type::tetrahedron, 3, quad_deg); } if (degree > 2) { // Interior integral moment std::tie(x[3], M[3]) = moments::make_dot_integral_moments( create_rt(cell::type::tetrahedron, degree - 2), cell::type::tetrahedron, 3, quad_deg); } return {x, M}; } //----------------------------------------------------------------------------- std::map> create_nedelec2_3d_entity_transformations(int degree) { std::map> entity_transformations; const xt::xtensor edge_transforms = moments::create_tangent_moment_dof_transformations( create_dlagrange(cell::type::interval, degree)); entity_transformations[cell::type::interval] = edge_transforms; // Faces if (degree == 1) { entity_transformations[cell::type::triangle] = xt::xtensor({2, 0, 0}); } else { const xt::xtensor face_transforms = moments::create_dot_moment_dof_transformations( create_rt(cell::type::triangle, degree - 1)); entity_transformations[cell::type::triangle] = face_transforms; } return entity_transformations; } } // namespace //----------------------------------------------------------------------------- FiniteElement basix::create_nedelec(cell::type celltype, int degree) { std::array>, 4> M; std::array>, 4> x; xt::xtensor wcoeffs; std::map> transforms; switch (celltype) { case cell::type::triangle: { wcoeffs = create_nedelec_2d_space(degree); transforms = create_nedelec_2d_entity_transforms(degree); std::tie(x, M) = create_nedelec_2d_interpolation(degree); break; } case cell::type::tetrahedron: { wcoeffs = create_nedelec_3d_space(degree); transforms = create_nedelec_3d_entity_transforms(degree); std::tie(x, M) = create_nedelec_3d_interpolation(degree); break; } default: throw std::runtime_error("Invalid celltype in Nedelec"); } const std::size_t tdim = cell::topological_dimension(celltype); const xt::xtensor coeffs = compute_expansion_coefficients( celltype, wcoeffs, {M[1], M[2], M[3]}, {x[1], x[2], x[3]}, degree); return FiniteElement(element::family::N1E, celltype, degree, {tdim}, coeffs, transforms, x, M, maps::type::covariantPiola); } //----------------------------------------------------------------------------- FiniteElement basix::create_nedelec2(cell::type celltype, int degree) { std::array>, 4> M; std::array>, 4> x; std::map> entity_transformations; switch (celltype) { case cell::type::triangle: { std::tie(x, M) = create_nedelec2_2d_interpolation(degree); entity_transformations = create_nedelec2_2d_entity_transformations(degree); break; } case cell::type::tetrahedron: { std::tie(x, M) = create_nedelec2_3d_interpolation(degree); entity_transformations = create_nedelec2_3d_entity_transformations(degree); break; } default: throw std::runtime_error("Invalid celltype in Nedelec"); } const std::size_t tdim = cell::topological_dimension(celltype); const std::size_t psize = polyset::dim(celltype, degree); xt::xtensor wcoeffs = xt::eye(tdim * psize); const xt::xtensor coeffs = compute_expansion_coefficients( celltype, wcoeffs, {M[1], M[2], M[3]}, {x[1], x[2], x[3]}, degree); return FiniteElement(element::family::N2E, celltype, degree, {tdim}, coeffs, entity_transformations, x, M, maps::type::covariantPiola); } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/nedelec.h000066400000000000000000000007151411115224000162520ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include "finite-element.h" namespace basix { /// Create Nedelec element (first kind) /// @param celltype /// @param degree FiniteElement create_nedelec(cell::type celltype, int degree); /// Create Nedelec element (second kind) /// @param celltype /// @param degree FiniteElement create_nedelec2(cell::type celltype, int degree); } // namespace basix basix-0.3.0/cpp/basix/polyset.cpp000066400000000000000000000605551411115224000167150ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT #include "polyset.h" #include "cell.h" #include "indexing.h" #include #include #include #include using namespace basix; namespace { // Compute coefficients in the Jacobi Polynomial recurrence relation constexpr std::array jrc(int a, int n) { double an = (a + 2 * n + 1) * (a + 2 * n + 2) / static_cast(2 * (n + 1) * (a + n + 1)); double bn = a * a * (a + 2 * n + 1) / static_cast(2 * (n + 1) * (a + n + 1) * (a + 2 * n)); double cn = n * (a + n) * (a + 2 * n + 2) / static_cast((n + 1) * (a + n + 1) * (a + 2 * n)); return {an, bn, cn}; } //----------------------------------------------------------------------------- // Compute the complete set of derivatives from 0 to nderiv, for all the // polynomials up to order n on a line segment. The polynomials used are // Legendre Polynomials, with the recurrence relation given by // n P(n) = (2n - 1) x P_{n-1} - (n - 1) P_{n-2} in the interval [-1, 1]. The // range is rescaled here to [0, 1]. xt::xtensor tabulate_polyset_line_derivs(std::size_t degree, std::size_t nderiv, const xt::xtensor& x) { assert(x.shape(0) > 0); const auto X = x * 2.0 - 1.0; const std::size_t m = (degree + 1); xt::xtensor P({nderiv + 1, x.shape(0), m}); for (std::size_t k = 0; k <= nderiv; ++k) { // Get reference to this derivative auto result = xt::view(P, k, xt::all(), xt::all()); if (k == 0) xt::col(result, 0) = 1.0; else xt::col(result, 0) = 0.0; auto result0 = xt::view(P, k - 1, xt::all(), xt::all()); for (std::size_t p = 1; p <= degree; ++p) { const double a = 1.0 - 1.0 / static_cast(p); xt::col(result, p) = X * xt::col(result, p - 1) * (a + 1.0); if (k > 0) xt::col(result, p) += 2 * k * xt::col(result0, p - 1) * (a + 1.0); if (p > 1) xt::col(result, p) -= xt::col(result, p - 2) * a; } } // Normalise for (std::size_t k = 0; k < nderiv + 1; ++k) for (std::size_t p = 0; p <= degree; ++p) xt::view(P, k, xt::all(), p) *= std::sqrt(p + 0.5); return P; } //----------------------------------------------------------------------------- // Compute the complete set of derivatives from 0 to nderiv, for all the // polynomials up to order n on a triangle in [0, 1][0, 1]. The // polynomials P_{pq} are built up in sequence, firstly along q = 0, // which is a line segment, as in tabulate_polyset_interval_derivs // above, but with a change of variables. The polynomials are then // extended in the q direction, using the relation given in Sherwin and // Karniadakis 1995 (https://doi.org/10.1016/0045-7825(94)00745-9). xt::xtensor tabulate_polyset_triangle_derivs(int n, int nderiv, const xt::xtensor& pts) { assert(pts.shape(1) == 2); const auto x = pts * 2.0 - 1.0; auto x0 = xt::col(x, 0); auto x1 = xt::col(x, 1); const std::size_t m = (n + 1) * (n + 2) / 2; const std::size_t md = (nderiv + 1) * (nderiv + 2) / 2; xt::xtensor P({md, pts.shape(0), m}); // f3 = ((1 - y) / 2)^2 const auto f3 = xt::square(1.0 - x1) * 0.25; // Iterate over derivatives in increasing order, since higher derivatives // Depend on earlier calculations xt::xtensor result({pts.shape(0), m}); for (int k = 0; k <= nderiv; ++k) { for (int kx = 0; kx <= k; ++kx) { const int ky = k - kx; if (kx == 0 and ky == 0) xt::col(result, 0) = 1.0; else xt::col(result, 0) = 0.0; for (int p = 1; p < n + 1; ++p) { auto p0 = xt::col(result, idx(p, 0)); const double a = static_cast(2 * p - 1) / static_cast(p); p0 = (x0 + 0.5 * x1 + 0.5) * xt::col(result, idx(p - 1, 0)) * a; if (kx > 0) { auto result0 = xt::view(P, idx(kx - 1, ky), xt::all(), idx(p - 1, 0)); p0 += 2 * kx * a * result0; } if (ky > 0) { auto result0 = xt::view(P, idx(kx, ky - 1), xt::all(), idx(p - 1, 0)); p0 += ky * a * result0; } if (p > 1) { // y^2 terms p0 -= f3 * xt::col(result, idx(p - 2, 0)) * (a - 1.0); if (ky > 0) { auto result0 = xt::view(P, idx(kx, ky - 1), xt::all(), idx(p - 2, 0)); p0 -= ky * (x1 - 1.0) * result0 * (a - 1.0); } if (ky > 1) { auto result0 = xt::view(P, idx(kx, ky - 2), xt::all(), idx(p - 2, 0)); p0 -= ky * (ky - 1) * result0 * (a - 1.0); } } } for (int p = 0; p < n; ++p) { auto p0 = xt::col(result, idx(p, 0)); auto p1 = xt::col(result, idx(p, 1)); p1 = p0 * (x1 * (1.5 + p) + 0.5 + p); if (ky > 0) { auto result0 = xt::view(P, idx(kx, ky - 1), xt::all(), idx(p, 0)); p1 += 2 * ky * (1.5 + p) * result0; } for (int q = 1; q < n - p; ++q) { const auto [a1, a2, a3] = jrc(2 * p + 1, q); xt::col(result, idx(p, q + 1)) = xt::col(result, idx(p, q)) * (x1 * a1 + a2) - xt::col(result, idx(p, q - 1)) * a3; if (ky > 0) { auto result0 = xt::view(P, idx(kx, ky - 1), xt::all(), idx(p, q)); xt::col(result, idx(p, q + 1)) += 2 * ky * a1 * result0; } } } // Store xt::view(P, idx(kx, ky), xt::all(), xt::all()) = result; } } // Normalisation for (std::size_t j = 0; j < P.shape(0); ++j) { auto Pj = xt::view(P, j, xt::all(), xt::all()); for (int p = 0; p <= n; ++p) for (int q = 0; q <= n - p; ++q) xt::col(Pj, idx(p, q)) *= std::sqrt((p + 0.5) * (p + q + 1)); } return P; } //----------------------------------------------------------------------------- xt::xtensor tabulate_polyset_tetrahedron_derivs(int n, std::size_t nderiv, const xt::xtensor& pts) { assert(pts.shape(1) == 3); const std::size_t m = (n + 1) * (n + 2) * (n + 3) / 6; const std::size_t md = (nderiv + 1) * (nderiv + 2) * (nderiv + 3) / 6; auto x = pts * 2.0 - 1.0; const auto x0 = xt::col(x, 0); const auto x1 = xt::col(x, 1); const auto x2 = xt::col(x, 2); auto f2 = 0.25 * xt::square(x1 + x2); auto f3 = 0.5 * (1.0 + x1 * 2.0 + x2); auto f4 = 0.5 * (1.0 - x2); auto f5 = f4 * f4; // Traverse derivatives in increasing order xt::xtensor P({md, pts.shape(0), m}); xt::xtensor result({pts.shape(0), m}); for (std::size_t k = 0; k <= nderiv; ++k) { for (std::size_t j = 0; j <= k; ++j) { for (std::size_t kx = 0; kx <= j; ++kx) { const std::size_t ky = j - kx; const std::size_t kz = k - j; if (kx == 0 and ky == 0 and kz == 0) xt::col(result, 0) = 1.0; else xt::col(result, 0) = 0.0; for (int p = 1; p <= n; ++p) { auto p00 = xt::col(result, idx(p, 0, 0)); double a = static_cast(2 * p - 1) / static_cast(p); p00 = (x0 + 0.5 * (x1 + x2) + 1.0) * xt::col(result, idx(p - 1, 0, 0)) * a; if (kx > 0) { p00 += 2 * kx * a * xt::view(P, idx(kx - 1, ky, kz), xt::all(), idx(p - 1, 0, 0)); } if (ky > 0) { p00 += ky * a * xt::view(P, idx(kx, ky - 1, kz), xt::all(), idx(p - 1, 0, 0)); } if (kz > 0) { p00 += kz * a * xt::view(P, idx(kx, ky, kz - 1), xt::all(), idx(p - 1, 0, 0)); } if (p > 1) { p00 -= f2 * xt::col(result, idx(p - 2, 0, 0)) * (a - 1.0); if (ky > 0) { p00 -= ky * (x1 + x2) * xt::view(P, idx(kx, ky - 1, kz), xt::all(), idx(p - 2, 0, 0)) * (a - 1.0); } if (ky > 1) { p00 -= ky * (ky - 1) * xt::view(P, idx(kx, ky - 2, kz), xt::all(), idx(p - 2, 0, 0)) * (a - 1.0); } if (kz > 0) { p00 -= kz * (x1 + x2) * xt::view(P, idx(kx, ky, kz - 1), xt::all(), idx(p - 2, 0, 0)) * (a - 1.0); } if (kz > 1) { p00 -= kz * (kz - 1) * xt::view(P, idx(kx, ky, kz - 2), xt::all(), idx(p - 2, 0, 0)) * (a - 1.0); } if (ky > 0 and kz > 0) { p00 -= 2.0 * ky * kz * xt::view(P, idx(kx, ky - 1, kz - 1), xt::all(), idx(p - 2, 0, 0)) * (a - 1.0); } } } for (int p = 0; p < n; ++p) { auto p10 = xt::col(result, idx(p, 1, 0)); p10 = xt::col(result, idx(p, 0, 0)) * ((1.0 + x1) * p + (2.0 + x1 * 3.0 + x2) * 0.5); if (ky > 0) { p10 += 2 * ky * xt::view(P, idx(kx, ky - 1, kz), xt::all(), idx(p, 0, 0)) * (1.5 + p); } if (kz > 0) { p10 += kz * xt::view(P, idx(kx, ky, kz - 1), xt::all(), idx(p, 0, 0)); } for (int q = 1; q < n - p; ++q) { auto [aq, bq, cq] = jrc(2 * p + 1, q); auto pq1 = xt::col(result, idx(p, q + 1, 0)); pq1 = xt::col(result, idx(p, q, 0)) * (f3 * aq + f4 * bq) - xt::col(result, idx(p, q - 1, 0)) * f5 * cq; if (ky > 0) { pq1 += 2 * ky * xt::view(P, idx(kx, ky - 1, kz), xt::all(), idx(p, q, 0)) * aq; } if (kz > 0) { pq1 += kz * xt::view(P, idx(kx, ky, kz - 1), xt::all(), idx(p, q, 0)) * (aq - bq) + kz * (1.0 - x2) * xt::view(P, idx(kx, ky, kz - 1), xt::all(), idx(p, q - 1, 0)) * cq; } if (kz > 1) { // Quadratic term in z pq1 -= kz * (kz - 1) * xt::view(P, idx(kx, ky, kz - 2), xt::all(), idx(p, q - 1, 0)) * cq; } } } for (int p = 0; p < n; ++p) { for (int q = 0; q < n - p; ++q) { auto pq = xt::col(result, idx(p, q, 1)); pq = xt::col(result, idx(p, q, 0)) * ((1.0 + p + q) + x2 * (2.0 + p + q)); if (kz > 0) { pq += 2 * kz * (2.0 + p + q) * xt::view(P, idx(kx, ky, kz - 1), xt::all(), idx(p, q, 0)); } } } for (int p = 0; p < n - 1; ++p) { for (int q = 0; q < n - p - 1; ++q) { for (int r = 1; r < n - p - q; ++r) { auto [ar, br, cr] = jrc(2 * p + 2 * q + 2, r); xt::col(result, idx(p, q, r + 1)) = xt::col(result, idx(p, q, r)) * (x2 * ar + br) - xt::col(result, idx(p, q, r - 1)) * cr; if (kz > 0) { xt::col(result, idx(p, q, r + 1)) += 2 * kz * ar * xt::view(P, idx(kx, ky, kz - 1), xt::all(), idx(p, q, r)); } } } } // Store this derivative xt::view(P, idx(kx, ky, kz), xt::all(), xt::all()) = result; } } } // Normalise for (std::size_t i = 0; i < P.shape(0); ++i) { auto Pi = xt::view(P, i, xt::all(), xt::all()); for (int p = 0; p < n + 1; ++p) { for (int q = 0; q < n + 1 - p; ++q) { for (int r = 0; r < n + 1 - p - q; ++r) { xt::col(Pi, idx(p, q, r)) *= std::sqrt((p + 0.5) * (p + q + 1.0) * (p + q + r + 1.5)); } } } } return P; } //----------------------------------------------------------------------------- xt::xtensor tabulate_polyset_pyramid_derivs(int n, std::size_t nderiv, const xt::xtensor& pts) { assert(pts.shape(1) == 3); const std::size_t m = (n + 1) * (n + 2) * (2 * n + 3) / 6; const std::size_t md = (nderiv + 1) * (nderiv + 2) * (nderiv + 3) / 6; // Indexing for pyramidal basis functions auto pyr_idx = [n](int p, int q, int r) -> std::size_t { const int rv = n - r + 1; const int r0 = r * (n + 1) * (n - r + 2) + (2 * r - 1) * (r - 1) * r / 6; return r0 + p * rv + q; }; auto x = pts * 2.0 - 1.0; const auto x0 = xt::col(x, 0); const auto x1 = xt::col(x, 1); const auto x2 = xt::col(x, 2); auto f2 = 0.25 * xt::square(1.0 - x2); // Traverse derivatives in increasing order xt::xtensor P({md, pts.shape(0), m}); xt::xtensor result({pts.shape(0), m}); for (std::size_t k = 0; k < nderiv + 1; ++k) { for (std::size_t j = 0; j < k + 1; ++j) { for (std::size_t kx = 0; kx < j + 1; ++kx) { result = xt::zeros(result.shape()); const std::size_t ky = j - kx; const std::size_t kz = k - j; const std::size_t pyramidal_index = pyr_idx(0, 0, 0); assert(pyramidal_index < m); if (kx == 0 and ky == 0 and kz == 0) xt::col(result, pyramidal_index) = 1.0; else xt::col(result, pyramidal_index) = 0.0; // r = 0 for (int p = 0; p < n + 1; ++p) { if (p > 0) { const double a = static_cast(p - 1) / static_cast(p); auto p00 = xt::col(result, pyr_idx(p, 0, 0)); p00 = (0.5 + x0 + x2 * 0.5) * xt::col(result, pyr_idx(p - 1, 0, 0)) * (a + 1.0); if (kx > 0) { p00 += 2.0 * kx * xt::view(P, idx(kx - 1, ky, kz), xt::all(), pyr_idx(p - 1, 0, 0)) * (a + 1.0); } if (kz > 0) { p00 += kz * xt::view(P, idx(kx, ky, kz - 1), xt::all(), pyr_idx(p - 1, 0, 0)) * (a + 1.0); } if (p > 1) { p00 -= f2 * xt::col(result, pyr_idx(p - 2, 0, 0)) * a; if (kz > 0) { p00 += kz * (1.0 - x2) * xt::view(P, idx(kx, ky, kz - 1), xt::all(), pyr_idx(p - 2, 0, 0)) * a; } if (kz > 1) { // quadratic term in z p00 -= kz * (kz - 1) * xt::view(P, idx(kx, ky, kz - 2), xt::all(), pyr_idx(p - 2, 0, 0)) * a; } } } for (int q = 1; q < n + 1; ++q) { const double a = static_cast(q - 1) / static_cast(q); auto r_pq = xt::col(result, pyr_idx(p, q, 0)); r_pq = (0.5 + x1 + x2 * 0.5) * xt::col(result, pyr_idx(p, q - 1, 0)) * (a + 1.0); if (ky > 0) { r_pq += 2.0 * ky * xt::view(P, idx(kx, ky - 1, kz), xt::all(), pyr_idx(p, q - 1, 0)) * (a + 1.0); } if (kz > 0) { r_pq += kz * xt::view(P, idx(kx, ky, kz - 1), xt::all(), pyr_idx(p, q - 1, 0)) * (a + 1.0); } if (q > 1) { r_pq -= f2 * xt::col(result, pyr_idx(p, q - 2, 0)) * a; if (kz > 0) { r_pq += kz * (1.0 - x2) * xt::view(P, idx(kx, ky, kz - 1), xt::all(), pyr_idx(p, q - 2, 0)) * a; } if (kz > 1) { r_pq -= kz * (kz - 1) * xt::view(P, idx(kx, ky, kz - 2), xt::all(), pyr_idx(p, q - 2, 0)) * a; } } } } // Extend into r > 0 for (int p = 0; p < n; ++p) { for (int q = 0; q < n; ++q) { auto r_pq1 = xt::col(result, pyr_idx(p, q, 1)); r_pq1 = xt::col(result, pyr_idx(p, q, 0)) * ((1.0 + p + q) + x2 * (2.0 + p + q)); if (kz > 0) { r_pq1 += 2 * kz * xt::view(P, idx(kx, ky, kz - 1), xt::all(), pyr_idx(p, q, 0)) * (2.0 + p + q); } } } for (int r = 1; r < n + 1; ++r) { for (int p = 0; p < n - r; ++p) { for (int q = 0; q < n - r; ++q) { auto [ar, br, cr] = jrc(2 * p + 2 * q + 2, r); auto r_pqr = xt::col(result, pyr_idx(p, q, r + 1)); r_pqr = xt::col(result, pyr_idx(p, q, r)) * (x2 * ar + br) - xt::col(result, pyr_idx(p, q, r - 1)) * cr; if (kz > 0) { r_pqr += ar * 2 * kz * xt::view(P, idx(kx, ky, kz - 1), xt::all(), pyr_idx(p, q, r)); } } } } xt::view(P, idx(kx, ky, kz), xt::all(), xt::all()) = result; } } } for (std::size_t i = 0; i < P.shape(0); ++i) { auto Pi = xt::view(P, i, xt::all(), xt::all()); for (int r = 0; r < n + 1; ++r) { for (int p = 0; p < n - r + 1; ++p) { for (int q = 0; q < n - r + 1; ++q) { xt::col(Pi, pyr_idx(p, q, r)) *= std::sqrt((q + 0.5) * (p + 0.5) * (p + q + r + 1.5)); } } } } return P; } //----------------------------------------------------------------------------- xt::xtensor tabulate_polyset_quad_derivs(int n, int nderiv, const xt::xtensor& x) { assert(x.shape(1) == 2); const std::size_t m = (n + 1) * (n + 1); const std::size_t md = (nderiv + 1) * (nderiv + 2) / 2; // Compute 1D basis const xt::xtensor x0 = xt::col(x, 0); const xt::xtensor x1 = xt::col(x, 1); xt::xtensor px = tabulate_polyset_line_derivs(n, nderiv, x0); xt::xtensor py = tabulate_polyset_line_derivs(n, nderiv, x1); xt::xtensor P({md, x.shape(0), m}); for (int kx = 0; kx < nderiv + 1; ++kx) { auto p0 = xt::view(px, kx, xt::all(), xt::all()); for (int ky = 0; ky < nderiv + 1 - kx; ++ky) { auto result = xt::view(P, idx(kx, ky), xt::all(), xt::all()); auto p1 = xt::view(py, ky, xt::all(), xt::all()); int c = 0; for (std::size_t i = 0; i < p0.shape(1); ++i) for (std::size_t j = 0; j < p0.shape(1); ++j) xt::col(result, c++) = xt::col(p0, i) * xt::col(p1, j); } } return P; } //----------------------------------------------------------------------------- xt::xtensor tabulate_polyset_hex_derivs(std::size_t n, std::size_t nderiv, const xt::xtensor& x) { assert(x.shape(1) == 3); const std::size_t m = (n + 1) * (n + 1) * (n + 1); const std::size_t md = (nderiv + 1) * (nderiv + 2) * (nderiv + 3) / 6; // Compute 1D basis const xt::xtensor x0 = xt::col(x, 0); const xt::xtensor x1 = xt::col(x, 1); const xt::xtensor x2 = xt::col(x, 2); xt::xtensor px = tabulate_polyset_line_derivs(n, nderiv, x0); xt::xtensor py = tabulate_polyset_line_derivs(n, nderiv, x1); xt::xtensor pz = tabulate_polyset_line_derivs(n, nderiv, x2); // Compute basis xt::xtensor P({md, x.shape(0), m}); for (std::size_t kx = 0; kx < nderiv + 1; ++kx) { auto p0 = xt::view(px, kx, xt::all(), xt::all()); for (std::size_t ky = 0; ky < nderiv + 1 - kx; ++ky) { auto p1 = xt::view(py, ky, xt::all(), xt::all()); for (std::size_t kz = 0; kz < nderiv + 1 - kx - ky; ++kz) { auto result = xt::view(P, idx(kx, ky, kz), xt::all(), xt::all()); auto p2 = xt::view(pz, kz, xt::all(), xt::all()); int c = 0; for (std::size_t i = 0; i < p0.shape(1); ++i) { auto pi = xt::col(p0, i); for (std::size_t j = 0; j < p1.shape(1); ++j) { auto pj = xt::col(p1, j); for (std::size_t k = 0; k < p2.shape(1); ++k) xt::col(result, c++) = pi * pj * xt::col(p2, k); } } } } } return P; } //----------------------------------------------------------------------------- xt::xtensor tabulate_polyset_prism_derivs(std::size_t n, std::size_t nderiv, const xt::xtensor& x) { assert(x.shape(1) == 3); const std::size_t m = (n + 1) * (n + 1) * (n + 2) / 2; const std::size_t md = (nderiv + 1) * (nderiv + 2) * (nderiv + 3) / 6; const xt::xtensor x01 = xt::view(x, xt::all(), xt::range(0, 2)); const xt::xtensor x2 = xt::col(x, 2); xt::xtensor pxy = tabulate_polyset_triangle_derivs(n, nderiv, x01); xt::xtensor pz = tabulate_polyset_line_derivs(n, nderiv, x2); xt::xtensor P({md, x.shape(0), m}); for (std::size_t kx = 0; kx < nderiv + 1; ++kx) { for (std::size_t ky = 0; ky < nderiv + 1 - kx; ++ky) { auto p0 = xt::view(pxy, idx(kx, ky), xt::all(), xt::all()); for (std::size_t kz = 0; kz < nderiv + 1 - kx - ky; ++kz) { auto p1 = xt::view(pz, kz, xt::all(), xt::all()); auto result = xt::view(P, idx(kx, ky, kz), xt::all(), xt::all()); int c = 0; for (std::size_t i = 0; i < p0.shape(1); ++i) for (std::size_t k = 0; k < p1.shape(1); ++k) xt::col(result, c++) = xt::col(p0, i) * xt::col(p1, k); } } } return P; } } // namespace //----------------------------------------------------------------------------- xt::xtensor polyset::tabulate(cell::type celltype, int d, int n, const xt::xarray& x) { switch (celltype) { case cell::type::interval: assert(x.dimension() == 1); return tabulate_polyset_line_derivs(d, n, x); case cell::type::triangle: return tabulate_polyset_triangle_derivs(d, n, x); case cell::type::tetrahedron: return tabulate_polyset_tetrahedron_derivs(d, n, x); case cell::type::quadrilateral: return tabulate_polyset_quad_derivs(d, n, x); case cell::type::prism: return tabulate_polyset_prism_derivs(d, n, x); case cell::type::pyramid: return tabulate_polyset_pyramid_derivs(d, n, x); case cell::type::hexahedron: return tabulate_polyset_hex_derivs(d, n, x); default: throw std::runtime_error("Polynomial set: unsupported cell type"); } } //----------------------------------------------------------------------------- int polyset::dim(cell::type celltype, int d) { switch (celltype) { case cell::type::triangle: return (d + 1) * (d + 2) / 2; case cell::type::tetrahedron: return (d + 1) * (d + 2) * (d + 3) / 6; case cell::type::prism: return (d + 1) * (d + 1) * (d + 2) / 2; case cell::type::pyramid: return (d + 1) * (d + 2) * (2 * d + 3) / 6; case cell::type::interval: return (d + 1); case cell::type::quadrilateral: return (d + 1) * (d + 1); case cell::type::hexahedron: return (d + 1) * (d + 1) * (d + 1); default: return 1; } } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/polyset.h000066400000000000000000000053541411115224000163560ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include "cell.h" #include #include /// ## Orthonormal polynomial basis on reference cell /// These are the underlying "expansion sets" for all finite elements, /// which when multiplied by a set of "coefficients" give the FE basis /// functions. /// /// The polynomials (and their derivatives) can be tabulated on unit /// interval, triangle, tetrahedron, quadrilateral, hexahedron, prism /// and pyramids. namespace basix::polyset { /// Tabulate the orthonormal polynomial basis, and derivatives, at /// points on the reference cell. /// /// All derivatives up to the given order are computed. If derivatives /// are not required, use `n = 0`. For example, order `n = 2` for a 2D /// cell, will compute the basis \f$\psi, d\psi/dx, d\psi/dy, d^2 /// \psi/dx^2, d^2\psi/dxdy, d^2\psi/dy^2\f$ in that order (0, 0), (1, /// 0), (0, 1), (2, 0), (1, 1), (0 ,2). /// /// For an interval cell there are `nderiv + 1` derivatives, for a 2D /// cell, there are `(nderiv + 1)(nderiv + 2)/2` derivatives, and in 3D, /// there are `(nderiv + 1)(nderiv + 2)(nderiv + 3)/6`. The ordering is /// 'triangular' with the lower derivatives appearing first. /// /// @param[in] celltype Cell type /// @param[in] d Polynomial degree /// @param[in] n Maximum derivative order. Use n = 0 for the basis only. /// @param[in] x Points at which to evaluate the basis. The shape is /// (number of points, geometric dimension). /// @return Polynomial sets, for each derivative, tabulated at points. /// The shape is `(number of derivatives computed, number of points, /// basis index)`. /// /// - The first index is the derivative. The first entry is the basis /// itself. Derivatives are stored in triangular (2D) or tetrahedral /// (3D) ordering, e.g. if `(p, q)` denotes `p` order dervative with /// repsect to `x` and `q` order derivative with respect to `y`, [0] -> /// (0, 0), [1] -> (1, 0), [2] -> (0, 1), [3] -> (2, 0), [4] -> (1, 1), /// [5] -> (0, 2), [6] -> (3, 0),... /// The function basix::idx maps tuples `(p, q, r)` to the array index. /// /// - The second index is the point, with index `i` correspondign to the /// point in row `i` of @p x. /// /// - The third index is the basis function index. /// @todo Does the order for the third index need to be documented? xt::xtensor tabulate(cell::type celltype, int d, int n, const xt::xarray& x); /// Dimension of a polynomial space /// @param[in] cell The cell type /// @param[in] d The polynomial degree /// @return The number terms in the basis spanning a space of /// polynomial degree @p d int dim(cell::type cell, int d); } // namespace basix::polyset basix-0.3.0/cpp/basix/precompute.cpp000066400000000000000000000054761411115224000174020ustar00rootroot00000000000000// Copyright (c) 2020 Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #include "precompute.h" #include using namespace basix; //----------------------------------------------------------------------------- std::vector precompute::prepare_permutation(const std::vector& perm) { std::vector f_perm(perm.size()); for (std::size_t row = 0; row < perm.size(); ++row) { std::size_t row2 = perm[row]; while (row2 < row) row2 = perm[row2]; f_perm[row] = row2; } return f_perm; } //----------------------------------------------------------------------------- std::tuple, std::vector, xt::xtensor> precompute::prepare_matrix(const xt::xtensor& matrix) { using T = double; const std::size_t dim = matrix.shape(0); std::vector perm(dim); xt::xtensor permuted_matrix({dim, dim}); std::vector diag(dim); // Permute the matrix so that all the top left blocks are invertible for (std::size_t i = 0; i < dim; ++i) { double max_det = 0; std::size_t col = 0; for (std::size_t j = 0; j < dim; ++j) { const bool used = std::find(perm.begin(), std::next(perm.begin(), i), j) != std::next(perm.begin(), i); if (!used) { xt::col(permuted_matrix, i) = xt::col(matrix, j); double det = std::abs(xt::linalg::det(xt::view( permuted_matrix, xt::range(0, i + 1), xt::range(0, i + 1)))); if (det > max_det) { max_det = det; col = j; } } } xt::col(permuted_matrix, i) = xt::col(matrix, col); perm[i] = col; } // Create the precomputed representation of the matrix xt::xtensor prepared_matrix({dim, dim}); for (std::size_t i = 0; i < dim; ++i) { diag[i] = permuted_matrix(i, i); prepared_matrix(i, i) = 0; if (i < dim - 1) { xt::view(prepared_matrix, i, xt::range(i + 1, dim)) = xt::view(permuted_matrix, i, xt::range(i + 1, dim)); } if (i > 0) { xt::xtensor v = xt::linalg::solve( xt::transpose( xt::view(permuted_matrix, xt::range(0, i), xt::range(0, i))), xt::view(permuted_matrix, i, xt::range(0, i))); xt::view(prepared_matrix, i, xt::range(0, i)) = v; diag[i] -= xt::linalg::dot( v, xt::view(permuted_matrix, xt::range(0, i), i))(0); for (std::size_t j = i + 1; j < dim; ++j) { prepared_matrix(i, j) -= xt::linalg::dot( v, xt::view(permuted_matrix, xt::range(0, i), j))(0); } } } return {prepare_permutation(perm), std::move(diag), std::move(prepared_matrix)}; } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/precompute.h000066400000000000000000000323131411115224000170350ustar00rootroot00000000000000// Copyright (c) 2020 Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include #include #include #include /// ## Matrix and permutation precomputation /// These functions generate precomputed version of matrices to allow /// application without temporary memory assignment later namespace basix::precompute { /// Prepare a permutation /// /// This computes a representation of the permutation that allows the /// permutation to be applied without any temporary memory assignment. /// /// In pseudo code, this function does the following: /// /// \code{.pseudo} /// FOR index, entry IN perm: /// new_index = entry /// WHILE new_index < index: /// new_index = perm[new_index] /// OUT[index] = new_index /// \endcode /// /// Example /// ------- /// As an example, consider the permutation `P = [1, 4, 0, 5, 2, 3]`. /// /// First, we look at the 0th entry. `P[0]` is 1. This is greater than 0, so the /// 0th entry of the output is 1. /// /// Next, we look at the 1st entry. `P[1]` is 4. This is greater than 1, so the /// 1st entry of the output is 4. /// /// Next, we look at the 2nd entry. `P[2]` is 0. This is less than 2, so we look /// at `P[0]. `P[0]` is 1. This is less than 2, so we look at `P[1]`. `P[1]` /// is 4. This is greater than 2, so the 2nd entry of the output is 4. /// /// Next, we look at the 3rd entry. `P[3]` is 5. This is greater than 3, so the /// 3rd entry of the output is 5. /// /// Next, we look at the 4th entry. `P[4]` is 2. This is less than 4, so we look /// at `P[2]`. `P[2]` is 0. This is less than 4, so we look at `P[0]`. `P[0]` /// is 1. This is less than 4, so we look at `P[1]`. `P[1]` is 4. This is /// greater than (or equal to) 4, so the 4th entry of the output is 4. /// /// Next, we look at the 5th entry. `P[5]` is 3. This is less than 5, so we look /// at `P[3]`. `P[3]` is 5. This is greater than (or equal to) 5, so the 5th /// entry of the output is 5. /// /// Hence, the output of this function in this case is `[1, 4, 4, 5, 4, 5]`. /// /// For an example of how the permutation in this form is applied, see /// `apply_permutation()`. /// /// @param[in] perm A permutation /// @return The precomputed representation of the permutation std::vector prepare_permutation(const std::vector& perm); /// Apply a (precomputed) permutation /// /// This uses the representation returned by `prepare_permutation()` to apply a /// permutation without needing any temporary memory. /// /// In pseudo code, this function does the following: /// /// \code{.pseudo} /// FOR index, entry IN perm: /// SWAP(data[index], data[entry]) /// \endcode /// /// If `block_size` is set, this will apply the permutation to every block. /// The `offset` is set, this will start applying the permutation at the /// `offset`th block. /// /// Example /// ------- /// As an example, consider the permutation `P = [1, 4, 0, 5, 2, 3]`. /// In the documentation of `prepare_permutation()`, we saw that the precomputed /// representation of this permutation is `P2 = [1, 4, 4, 5, 4, 5]`. In this /// example, we look at how this representation can be used to apply this /// permutation to the array `A = [a, b, c, d, e, f]`. /// /// `P2[0]` is 1, so we swap `A[0]` and `A[1]`. After this, `A = [b, a, c, d, e, /// f]`. /// /// `P2[1]` is 4, so we swap `A[1]` and `A[4]`. After this, `A = [b, e, c, d, a, /// f]`. /// /// `P2[2]` is 4, so we swap `A[2]` and `A[4]`. After this, `A = [b, e, a, d, c, /// f]`. /// /// `P2[3]` is 5, so we swap `A[3]` and `A[5]`. After this, `A = [b, e, a, f, c, /// d]`. /// /// `P2[4]` is 4, so we swap `A[4]` and `A[4]`. This changes nothing. /// /// `P2[5]` is 5, so we swap `A[5]` and `A[5]`. This changes nothing. /// /// Therefore the result of applying this permutation is `[b, e, a, f, c, d]` /// (which is what we get if we apply the permutation directly). /// /// @param[in] perm A permutation in precomputed form (as returned by /// `prepare_permutation()`) /// @param[in,out] data The data to apply the permutation to /// @param[in] offset The position in the data to start applying the permutation /// @param[in] block_size The block size of the data template void apply_permutation(const std::vector& perm, const xtl::span& data, std::size_t offset = 0, std::size_t block_size = 1) { for (std::size_t b = 0; b < block_size; ++b) { for (std::size_t i = 0; i < perm.size(); ++i) { std::swap(data[block_size * (offset + i) + b], data[block_size * (offset + perm[i]) + b]); } } } /// Apply a (precomputed) permutation to some transposed data /// /// see `apply_permutation()`. template void apply_permutation_to_transpose(const std::vector& perm, const xtl::span& data, std::size_t offset = 0, std::size_t block_size = 1) { const std::size_t dim = perm.size(); const std::size_t data_size = data.size() / block_size; for (std::size_t b = 0; b < block_size; ++b) { for (std::size_t i = 0; i < dim; ++i) { std::swap(data[data_size * b + offset + i], data[data_size * b + offset + perm[i]]); } } } /// Prepare a matrix /// /// This computes a representation of the matrix that allows the matrix to be /// applied without any temporary memory assignment. /// /// This function will first permute the matrix's columns so that the top left /// @f$n\times n@f$ blocks are invertible (for all @f$n@f$). Let @f$A@f$ be the /// input matrix after the permutation is applied. The output vector @f$D@f$ and /// matrix @f$M@f$ are then given by: /// @f{align*}{ /// D_i &= \begin{cases} /// A_{i, i} & i = 0\\ /// A_{i, i} - A_{i,:i}A_{:i,:i}^{-1}A_{:i,i} & i \not= 0 /// \end{cases},\\ /// M_{i,j} &= \begin{cases} /// A_{i,:i}A_{:i,:i}^{-1}e_j & j < i\\ /// 0 & j = i\\ /// A_{i, i} - A_{i,:i}A_{:i,:i}^{-1}A_{:i,j} & j > i = 0 /// \end{cases}, /// @f} /// where @f$e_j@f$ is the @f$j@f$th coordinate vector, we index all the /// matrices and vector starting at 0, and we use numpy-slicing-stying notation /// in the subscripts: for example, @f$A_{:i,j}@f$ represents the first @f$i@f$ /// entries in the @f$j@f$th column of @f$A@f$ /// /// This function returns the permutation (precomputed as in /// `prepare_permutation()`), the vector @f$D@f$, and the matrix @f$M@f$ as a /// tuple. /// /// Example /// ------- /// As an example, consider the matrix @f$A = @f$ `[[-1, 0, 1], [1, 1, 0], [2, /// 0, 2]]`. For this matrix, no permutation is needed, so the first item in the /// output will represent the identity permutation. We now compute the output /// vector @f$D@f$ and matrix @f$M@f$. /// /// First, we set @f$D_0 = A_{0,0}=-1@f$, /// set the diagonal of @f$M@f$ to be 0 /// and set @f$M_{0, 1:} = A_{0, 1:}=\begin{bmatrix}0&1\end{bmatrix}@f$. /// The output so far is /// @f{align*}{ D &= \begin{bmatrix}-1\\?\\?\end{bmatrix},\\ /// \quad M &= \begin{bmatrix} /// 0&0&1\\ /// ?&0&?\\ /// ?&?&0 /// \end{bmatrix}. @f} /// /// Next, we set: /// @f{align*}{ D_1 &= A_{1,1} - A_{1, :1}A_{:1,:1}^{-1}A_{:1, 1}\\ /// &= 1 - /// \begin{bmatrix}-1\end{bmatrix}\cdot\begin{bmatrix}0\end{bmatrix}\\ /// &= 1,\\ /// M_{2,0} &= A_{1, :1}A_{:1,:1}^{-1}e_0\\ /// &= \begin{bmatrix}1\end{bmatrix}\begin{bmatrix}-1\end{bmatrix}^{-1} /// \begin{bmatrix}1\end{bmatrix}\\ /// &= \begin{bmatrix}-1\end{bmatrix} /// M_{2,3} &= A_{1,2}-A_{1, :1}A_{:1,:1}^{-1}A_{:1, 1}\\ /// &= /// 0-\begin{bmatrix}1\end{bmatrix}\begin{bmatrix}-1\end{bmatrix}^{-1} /// \begin{bmatrix}1\end{bmatrix},\\ /// &= 1. /// @f} /// The output so far is /// @f{align*}{ D &= \begin{bmatrix}-1\\1\\?\end{bmatrix},\\ /// \quad M &= \begin{bmatrix} /// 0&0&1\\ /// -1&0&1\\ /// ?&?&0 /// \end{bmatrix}. @f} /// /// Next, we set: /// @f{align*}{ D_2 &= A_{2,2} - A_{2, :2}A_{:2,:2}^{-1}A_{:2, 2}\\ /// &= 2 - /// \begin{bmatrix}2&0\end{bmatrix} /// \begin{bmatrix}-1&0\\1&1\end{bmatrix}^{-1} /// \begin{bmatrix}1\\0\end{bmatrix}\\ /// &= 4,\\ /// M_{2,0} &= A_{2, :2}A_{:2,:2}^{-1}e_0\\ &= -2.\\ /// M_{2,1} &= A_{2, :2}A_{:2,:2}^{-1}e_1\\ &= 0.\\ /// @f} /// The output is /// @f{align*}{ D &= \begin{bmatrix}-1\\1\\4\end{bmatrix},\\ /// \quad M &= \begin{bmatrix} /// 0&0&1\\ /// -1&0&1\\ /// -2&0&0 /// \end{bmatrix}. @f} /// /// For an example of how the permutation in this form is applied, see /// `apply_matrix()`. /// /// @param[in] matrix A matrix /// @return The precomputed representation of the matrix std::tuple, std::vector, xt::xtensor> prepare_matrix(const xt::xtensor& matrix); /// Apply a (precomputed) matrix /// /// This uses the representation returned by `prepare_matrix()` to apply a /// matrix without needing any temporary memory. /// /// In pseudo code, this function does the following: /// /// \code{.pseudo} /// perm, diag, mat = matrix /// apply_permutation(perm, data) /// FOR index IN RANGE(dim): /// data[index] *= diag[index] /// FOR j IN RANGE(dim): /// data[index] *= mat[index, j] * data[j] /// \endcode /// /// If `block_size` is set, this will apply the permutation to every block. /// The `offset` is set, this will start applying the permutation at the /// `offset`th block. /// /// Example /// ------- /// As an example, consider the matrix @f$A = @f$ `[[-1, 0, 1], [1, 1, 0], [2, /// 0, 2]]`. In the documentation of `prepare_matrix()`, we saw that the /// precomputed representation of this matrix is the identity permutation, /// @f{align*}{ D &= \begin{bmatrix}-1\\1\\4\end{bmatrix},\\ /// \quad M &= \begin{bmatrix} /// 0&0&1\\ /// -1&0&1\\ /// -2&0&0 /// \end{bmatrix}. @f} /// In this example, we look at how this representation can be used to /// apply this matrix to the vector @f$v = @f$ `[3, -1, 2]`. /// /// No permutation is necessary, so first, we multiply @f$v_0@f$ by /// @f$D_0=-1@f$. After this, @f$v@f$ is `[-3, -1, 2]`. /// /// Next, we add @f$M_{0,i}v_i@f$ to @f$v_0@f$ for all @f$i@f$: in this case, we /// add @f$0\times-3 + 0\times-1 + 1\times2 = 2@f$. After this, @f$v@f$ is `[-1, /// -1, 2]`. /// /// Next, we multiply @f$v_1@f$ by @f$D_1=1@f$. After this, @f$v@f$ is `[-1, -1, /// 2]`. /// /// Next, we add @f$M_{1,i}v_i@f$ to @f$v_1@f$ for all @f$i@f$: in this case, we /// add @f$-1\times-1 + 0\times-1 + 1\times2 = 3@f$. After this, @f$v@f$ is /// `[-1, 2, 2]`. /// /// Next, we multiply @f$v_2@f$ by @f$D_2=4@f$. After this, @f$v@f$ is `[-1, 2, /// 8]`. /// /// Next, we add @f$M_{2,i}v_i@f$ to @f$v_2@f$ for all @f$i@f$: in this case, we /// add @f$-2\times-1 + 0\times2 + 0\times8 = 2@f$. After this, @f$v@f$ is `[-1, /// 2, 10]`. This final value of @f$v@f$ is what the result of @f$Av@f$ /// /// @param[in] matrix A matrix in precomputed form (as returned by /// `prepare_matrix()`) /// @param[in,out] data The data to apply the permutation to /// @param[in] offset The position in the data to start applying the permutation /// @param[in] block_size The block size of the data template void apply_matrix(const std::tuple, std::vector, xt::xtensor>& matrix, const xtl::span& data, std::size_t offset = 0, std::size_t block_size = 1) { const std::vector& v_size_t = std::get<0>(matrix); const std::vector& v_t = std::get<1>(matrix); const xt::xtensor& M = std::get<2>(matrix); const std::size_t dim = v_size_t.size(); apply_permutation(v_size_t, data, offset, block_size); for (std::size_t b = 0; b < block_size; ++b) { for (std::size_t i = 0; i < dim; ++i) { data[block_size * (offset + i) + b] *= v_t[i]; for (std::size_t j = 0; j < dim; ++j) { data[block_size * (offset + i) + b] += M(i, j) * data[block_size * (offset + j) + b]; } } } } /// Apply a (precomputed) matrix to some transposed data. /// /// See `apply_matrix()`. template void apply_matrix_to_transpose( const std::tuple, std::vector, xt::xtensor>& matrix, const xtl::span& data, std::size_t offset = 0, std::size_t block_size = 1) { const std::vector& v_size_t = std::get<0>(matrix); const std::vector& v_t = std::get<1>(matrix); const xt::xtensor& M = std::get<2>(matrix); const std::size_t dim = v_size_t.size(); const std::size_t data_size = data.size() / block_size; apply_permutation_to_transpose(v_size_t, data, offset, block_size); for (std::size_t b = 0; b < block_size; ++b) { for (std::size_t i = 0; i < dim; ++i) { data[data_size * b + offset + i] *= v_t[i]; for (std::size_t j = 0; j < dim; ++j) { data[data_size * b + offset + i] += M(i, j) * data[data_size * b + offset + j]; } } } } } // namespace basix::precompute basix-0.3.0/cpp/basix/quadrature.cpp000066400000000000000000002510221411115224000173620ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT #include "quadrature.h" #include #include #include #include #include #include #include using namespace xt::placeholders; // required for `_` to work using namespace basix; namespace { //---------------------------------------------------------------------------- std::array, 2> rec_jacobi(int N, double a, double b) { // Generate the recursion coefficients alpha_k, beta_k // P_{k+1}(x) = (x-alpha_k)*P_{k}(x) - beta_k P_{k-1}(x) // for the Jacobi polynomials which are orthogonal on [-1,1] // with respect to the weight w(x)=[(1-x)^a]*[(1+x)^b] // Inputs: // N - polynomial order // a - weight parameter // b - weight parameter // Outputs: // alpha - recursion coefficients // beta - recursion coefficients // Adapted from the MATLAB code by Dirk Laurie and Walter Gautschi // http://www.cs.purdue.edu/archives/2002/wxg/codes/r_jacobi.m double nu = (b - a) / (a + b + 2.0); double mu = std::pow(2.0, (a + b + 1)) * std::tgamma(a + 1.0) * std::tgamma(b + 1.0) / std::tgamma(a + b + 2.0); std::vector alpha(N), beta(N); alpha[0] = nu; beta[0] = mu; auto n = xt::linspace(1.0, N - 1, N - 1); auto nab = 2.0 * n + a + b; auto _alpha = xt::adapt(alpha); auto _beta = xt::adapt(beta); xt::view(_alpha, xt::range(1, _)) = (b * b - a * a) / (nab * (nab + 2.0)); xt::view(_beta, xt::range(1, _)) = 4 * (n + a) * (n + b) * n * (n + a + b) / (nab * nab * (nab + 1.0) * (nab - 1.0)); return {std::move(alpha), std::move(beta)}; } //---------------------------------------------------------------------------- std::array, 2> gauss(const std::vector& alpha, const std::vector& beta) { // Compute the Gauss nodes and weights from the recursion // coefficients associated with a set of orthogonal polynomials // // Inputs: // alpha - recursion coefficients // beta - recursion coefficients // // Outputs: // x - quadrature nodes // w - quadrature weights // // Adapted from the MATLAB code by Walter Gautschi // http://www.cs.purdue.edu/archives/2002/wxg/codes/gauss.m auto _alpha = xt::adapt(alpha); auto _beta = xt::adapt(beta); auto tmp = xt::sqrt(xt::view(_beta, xt::range(1, _))); // Note: forcing the layout type to get around an xtensor bug with Intel // Compilers // https://github.com/xtensor-stack/xtensor/issues/2351 xt::xtensor A = xt::diag(_alpha) + xt::diag(tmp, 1) + xt::diag(tmp, -1); auto [evals, evecs] = xt::linalg::eigh(A); std::vector x(evals.shape(0)), w(evals.shape(0)); xt::adapt(x) = evals; xt::adapt(w) = beta[0] * xt::square(xt::row(evecs, 0)); return {std::move(x), std::move(w)}; } //---------------------------------------------------------------------------- std::array, 2> lobatto(const std::vector& alpha, const std::vector& beta, double xl1, double xl2) { // Compute the Lobatto nodes and weights with the preassigned // nodes xl1,xl2 // // Inputs: // alpha - recursion coefficients // beta - recursion coefficients // xl1 - assigned node location // xl2 - assigned node location // Outputs: // x - quadrature nodes // w - quadrature weights // Based on the section 7 of the paper // "Some modified matrix eigenvalue problems" // by Gene Golub, SIAM Review Vol 15, No. 2, April 1973, pp.318--334 assert(alpha.size() == beta.size()); // Solve tridiagonal system using Thomas algorithm double g1(0.0), g2(0.0); const std::size_t n = alpha.size(); for (std::size_t i = 1; i < n - 1; ++i) { g1 = std::sqrt(beta[i]) / (alpha[i] - xl1 - std::sqrt(beta[i - 1]) * g1); g2 = std::sqrt(beta[i]) / (alpha[i] - xl2 - std::sqrt(beta[i - 1]) * g2); } g1 = 1.0 / (alpha[n - 1] - xl1 - std::sqrt(beta[n - 2]) * g1); g2 = 1.0 / (alpha[n - 1] - xl2 - std::sqrt(beta[n - 2]) * g2); std::vector alpha_l = alpha; alpha_l[n - 1] = (g1 * xl2 - g2 * xl1) / (g1 - g2); std::vector beta_l = beta; beta_l[n - 1] = (xl2 - xl1) / (g1 - g2); return gauss(alpha_l, beta_l); } //----------------------------------------------------------------------------- std::pair, std::vector> make_gauss_jacobi_quadrature(cell::type celltype, std::size_t m) { switch (celltype) { case cell::type::interval: return quadrature::make_quadrature_line(m); case cell::type::quadrilateral: { auto [QptsL, QwtsL] = quadrature::make_quadrature_line(m); xt::xtensor Qpts({m * m, 2}); std::vector Qwts(m * m); int c = 0; for (std::size_t j = 0; j < m; ++j) { for (std::size_t i = 0; i < m; ++i) { Qpts(c, 0) = QptsL[i]; Qpts(c, 1) = QptsL[j]; Qwts[c] = QwtsL[i] * QwtsL[j]; ++c; } } return {Qpts, Qwts}; } case cell::type::hexahedron: { auto [QptsL, QwtsL] = quadrature::make_quadrature_line(m); xt::xtensor Qpts({m * m * m, 3}); std::vector Qwts(m * m * m); int c = 0; for (std::size_t k = 0; k < m; ++k) { for (std::size_t j = 0; j < m; ++j) { for (std::size_t i = 0; i < m; ++i) { Qpts(c, 0) = QptsL[i]; Qpts(c, 1) = QptsL[j]; Qpts(c, 2) = QptsL[k]; Qwts[c] = QwtsL[i] * QwtsL[j] * QwtsL[k]; ++c; } } } return {Qpts, Qwts}; } case cell::type::prism: { auto [QptsL, QwtsL] = quadrature::make_quadrature_line(m); auto [QptsT, QwtsT] = quadrature::make_quadrature_triangle_collapsed(m); xt::xtensor Qpts({m * QptsT.shape(0), 3}); std::vector Qwts(m * QptsT.shape(0)); int c = 0; for (std::size_t k = 0; k < m; ++k) { for (std::size_t i = 0; i < QptsT.shape(0); ++i) { Qpts(c, 0) = QptsT(i, 0); Qpts(c, 1) = QptsT(i, 1); Qpts(c, 2) = QptsL[k]; Qwts[c] = QwtsT[i] * QwtsL[k]; ++c; } } return {Qpts, Qwts}; } case cell::type::pyramid: throw std::runtime_error("Pyramid not yet supported"); case cell::type::triangle: return quadrature::make_quadrature_triangle_collapsed(m); case cell::type::tetrahedron: return quadrature::make_quadrature_tetrahedron_collapsed(m); default: throw std::runtime_error("Unsupported celltype for make_quadrature"); } } //----------------------------------------------------------------------------- std::pair, std::vector> make_gll_quadrature(cell::type celltype, std::size_t m) { switch (celltype) { case cell::type::interval: return quadrature::make_gll_line(m); case cell::type::quadrilateral: { auto [QptsL, QwtsL] = quadrature::make_gll_line(m); xt::xtensor Qpts({m * m, 2}); std::vector Qwts(m * m); int c = 0; for (std::size_t j = 0; j < m; ++j) { for (std::size_t i = 0; i < m; ++i) { Qpts(c, 0) = QptsL[i]; Qpts(c, 1) = QptsL[j]; Qwts[c] = QwtsL[i] * QwtsL[j]; ++c; } } return {Qpts, Qwts}; } case cell::type::hexahedron: { auto [QptsL, QwtsL] = quadrature::make_gll_line(m); xt::xtensor Qpts({m * m * m, 3}); std::vector Qwts(m * m * m); int c = 0; for (std::size_t k = 0; k < m; ++k) { for (std::size_t j = 0; j < m; ++j) { for (std::size_t i = 0; i < m; ++i) { Qpts(c, 0) = QptsL[i]; Qpts(c, 1) = QptsL[j]; Qpts(c, 2) = QptsL[k]; Qwts[c] = QwtsL[i] * QwtsL[j] * QwtsL[k]; ++c; } } } return {Qpts, Qwts}; } case cell::type::prism: { throw std::runtime_error("Prism not yet supported"); } case cell::type::pyramid: throw std::runtime_error("Pyramid not yet supported"); case cell::type::triangle: throw std::runtime_error("Triangle not yet supported"); case cell::type::tetrahedron: throw std::runtime_error("Tetrahedron not yet supported"); default: throw std::runtime_error("Unsupported celltype for make_quadrature"); } } //----------------------------------------------------------------------------- std::pair, std::vector> make_default_tetrahedron_quadrature(int m) { if (m == 0 or m == 1) { // Scheme from Zienkiewicz and Taylor, 1 point, degree of precision 1 return {{{0.25, 0.25, 0.25}}, {1.0 / 6.0}}; } else if (m == 2) { // Scheme from Zienkiewicz and Taylor, 4 points, degree of precision 2 constexpr double a = 0.585410196624969, b = 0.138196601125011; xt::xtensor x = {{a, b, b}, {b, a, b}, {b, b, a}, {b, b, b}}; return {x, {1.0 / 24.0, 1.0 / 24.0, 1.0 / 24.0, 1.0 / 24.0}}; } else if (m == 3) { // Scheme from Zienkiewicz and Taylor, 5 points, degree of precision 3 // Note : this scheme has a negative weight xt::xtensor x{ {0.2500000000000000, 0.2500000000000000, 0.2500000000000000}, {0.5000000000000000, 0.1666666666666666, 0.1666666666666666}, {0.1666666666666666, 0.5000000000000000, 0.1666666666666666}, {0.1666666666666666, 0.1666666666666666, 0.5000000000000000}, {0.1666666666666666, 0.1666666666666666, 0.1666666666666666}}; return {x, {-0.8 / 6.0, 0.45 / 6.0, 0.45 / 6.0, 0.45 / 6.0, 0.45 / 6.0}}; } else if (m == 4) { // Keast rule, 14 points, degree of precision 4 // Values taken from // http://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tet/quadrature_rules_tet.html // (KEAST5) xt::xtensor x = {{0.0000000000000000, 0.5000000000000000, 0.5000000000000000}, {0.5000000000000000, 0.0000000000000000, 0.5000000000000000}, {0.5000000000000000, 0.5000000000000000, 0.0000000000000000}, {0.5000000000000000, 0.0000000000000000, 0.0000000000000000}, {0.0000000000000000, 0.5000000000000000, 0.0000000000000000}, {0.0000000000000000, 0.0000000000000000, 0.5000000000000000}, {0.6984197043243866, 0.1005267652252045, 0.1005267652252045}, {0.1005267652252045, 0.1005267652252045, 0.1005267652252045}, {0.1005267652252045, 0.1005267652252045, 0.6984197043243866}, {0.1005267652252045, 0.6984197043243866, 0.1005267652252045}, {0.0568813795204234, 0.3143728734931922, 0.3143728734931922}, {0.3143728734931922, 0.3143728734931922, 0.3143728734931922}, {0.3143728734931922, 0.3143728734931922, 0.0568813795204234}, {0.3143728734931922, 0.0568813795204234, 0.3143728734931922}}; std::vector w = {0.0190476190476190, 0.0190476190476190, 0.0190476190476190, 0.0190476190476190, 0.0190476190476190, 0.0190476190476190, 0.0885898247429807, 0.0885898247429807, 0.0885898247429807, 0.0885898247429807, 0.1328387466855907, 0.1328387466855907, 0.1328387466855907, 0.1328387466855907}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return x / 6.0; }); return {x, w}; } else if (m == 5) { // Keast rule, 15 points, degree of precision 5 // Values taken from // http://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tet/quadrature_rules_tet.html // (KEAST6) xt::xtensor x = {{0.2500000000000000, 0.2500000000000000, 0.2500000000000000}, {0.0000000000000000, 0.3333333333333333, 0.3333333333333333}, {0.3333333333333333, 0.3333333333333333, 0.3333333333333333}, {0.3333333333333333, 0.3333333333333333, 0.0000000000000000}, {0.3333333333333333, 0.0000000000000000, 0.3333333333333333}, {0.7272727272727273, 0.0909090909090909, 0.0909090909090909}, {0.0909090909090909, 0.0909090909090909, 0.0909090909090909}, {0.0909090909090909, 0.0909090909090909, 0.7272727272727273}, {0.0909090909090909, 0.7272727272727273, 0.0909090909090909}, {0.4334498464263357, 0.0665501535736643, 0.0665501535736643}, {0.0665501535736643, 0.4334498464263357, 0.0665501535736643}, {0.0665501535736643, 0.0665501535736643, 0.4334498464263357}, {0.0665501535736643, 0.4334498464263357, 0.4334498464263357}, {0.4334498464263357, 0.0665501535736643, 0.4334498464263357}, {0.4334498464263357, 0.4334498464263357, 0.0665501535736643}}; std::vector w = {0.1817020685825351, 0.0361607142857143, 0.0361607142857143, 0.0361607142857143, 0.0361607142857143, 0.0698714945161738, 0.0698714945161738, 0.0698714945161738, 0.0698714945161738, 0.0656948493683187, 0.0656948493683187, 0.0656948493683187, 0.0656948493683187, 0.0656948493683187, 0.0656948493683187}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return x / 6.0; }); return {x, w}; } else if (m == 6) { // Keast rule, 24 points, degree of precision 6 // Values taken from // http://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tet/quadrature_rules_tet.html // (KEAST7) xt::xtensor x = {{0.3561913862225449, 0.2146028712591517, 0.2146028712591517}, {0.2146028712591517, 0.2146028712591517, 0.2146028712591517}, {0.2146028712591517, 0.2146028712591517, 0.3561913862225449}, {0.2146028712591517, 0.3561913862225449, 0.2146028712591517}, {0.8779781243961660, 0.0406739585346113, 0.0406739585346113}, {0.0406739585346113, 0.0406739585346113, 0.0406739585346113}, {0.0406739585346113, 0.0406739585346113, 0.8779781243961660}, {0.0406739585346113, 0.8779781243961660, 0.0406739585346113}, {0.0329863295731731, 0.3223378901422757, 0.3223378901422757}, {0.3223378901422757, 0.3223378901422757, 0.3223378901422757}, {0.3223378901422757, 0.3223378901422757, 0.0329863295731731}, {0.3223378901422757, 0.0329863295731731, 0.3223378901422757}, {0.2696723314583159, 0.0636610018750175, 0.0636610018750175}, {0.0636610018750175, 0.2696723314583159, 0.0636610018750175}, {0.0636610018750175, 0.0636610018750175, 0.2696723314583159}, {0.6030056647916491, 0.0636610018750175, 0.0636610018750175}, {0.0636610018750175, 0.6030056647916491, 0.0636610018750175}, {0.0636610018750175, 0.0636610018750175, 0.6030056647916491}, {0.0636610018750175, 0.2696723314583159, 0.6030056647916491}, {0.2696723314583159, 0.6030056647916491, 0.0636610018750175}, {0.6030056647916491, 0.0636610018750175, 0.2696723314583159}, {0.0636610018750175, 0.6030056647916491, 0.2696723314583159}, {0.2696723314583159, 0.0636610018750175, 0.6030056647916491}, {0.6030056647916491, 0.2696723314583159, 0.0636610018750175}}; std::vector w = {0.0399227502581679, 0.0399227502581679, 0.0399227502581679, 0.0399227502581679, 0.0100772110553207, 0.0100772110553207, 0.0100772110553207, 0.0100772110553207, 0.0553571815436544, 0.0553571815436544, 0.0553571815436544, 0.0553571815436544, 0.0482142857142857, 0.0482142857142857, 0.0482142857142857, 0.0482142857142857, 0.0482142857142857, 0.0482142857142857, 0.0482142857142857, 0.0482142857142857, 0.0482142857142857, 0.0482142857142857, 0.0482142857142857, 0.0482142857142857}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return x / 6.0; }); return {x, w}; } else if (m == 7) { // Keast rule, 31 points, degree of precision 7 // Values taken from // http://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tet/quadrature_rules_tet.html // (KEAST8) xt::xtensor x = {{0.2500000000000000, 0.2500000000000000, 0.2500000000000000}, {0.7653604230090441, 0.0782131923303186, 0.0782131923303186}, {0.0782131923303186, 0.0782131923303186, 0.0782131923303186}, {0.0782131923303186, 0.0782131923303186, 0.7653604230090441}, {0.0782131923303186, 0.7653604230090441, 0.0782131923303186}, {0.6344703500082868, 0.1218432166639044, 0.1218432166639044}, {0.1218432166639044, 0.1218432166639044, 0.1218432166639044}, {0.1218432166639044, 0.1218432166639044, 0.6344703500082868}, {0.1218432166639044, 0.6344703500082868, 0.1218432166639044}, {0.0023825066607383, 0.3325391644464206, 0.3325391644464206}, {0.3325391644464206, 0.3325391644464206, 0.3325391644464206}, {0.3325391644464206, 0.3325391644464206, 0.0023825066607383}, {0.3325391644464206, 0.0023825066607383, 0.3325391644464206}, {0.0000000000000000, 0.5000000000000000, 0.5000000000000000}, {0.5000000000000000, 0.0000000000000000, 0.5000000000000000}, {0.5000000000000000, 0.5000000000000000, 0.0000000000000000}, {0.5000000000000000, 0.0000000000000000, 0.0000000000000000}, {0.0000000000000000, 0.5000000000000000, 0.0000000000000000}, {0.0000000000000000, 0.0000000000000000, 0.5000000000000000}, {0.2000000000000000, 0.1000000000000000, 0.1000000000000000}, {0.1000000000000000, 0.2000000000000000, 0.1000000000000000}, {0.1000000000000000, 0.1000000000000000, 0.2000000000000000}, {0.6000000000000000, 0.1000000000000000, 0.1000000000000000}, {0.1000000000000000, 0.6000000000000000, 0.1000000000000000}, {0.1000000000000000, 0.1000000000000000, 0.6000000000000000}, {0.1000000000000000, 0.2000000000000000, 0.6000000000000000}, {0.2000000000000000, 0.6000000000000000, 0.1000000000000000}, {0.6000000000000000, 0.1000000000000000, 0.2000000000000000}, {0.1000000000000000, 0.6000000000000000, 0.2000000000000000}, {0.2000000000000000, 0.1000000000000000, 0.6000000000000000}, {0.6000000000000000, 0.2000000000000000, 0.1000000000000000}}; std::vector w = {0.1095853407966528, 0.0635996491464850, 0.0635996491464850, 0.0635996491464850, 0.0635996491464850, -0.3751064406859797, -0.3751064406859797, -0.3751064406859797, -0.3751064406859797, 0.0293485515784412, 0.0293485515784412, 0.0293485515784412, 0.0293485515784412, 0.0058201058201058, 0.0058201058201058, 0.0058201058201058, 0.0058201058201058, 0.0058201058201058, 0.0058201058201058, 0.1653439153439105, 0.1653439153439105, 0.1653439153439105, 0.1653439153439105, 0.1653439153439105, 0.1653439153439105, 0.1653439153439105, 0.1653439153439105, 0.1653439153439105, 0.1653439153439105, 0.1653439153439105, 0.1653439153439105}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return x / 6.0; }); return {x, w}; } else if (m == 8) { // Keast rule, 45 points, degree of precision 8 // Values taken from // http://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_tet/quadrature_rules_tet.html // (KEAST9) xt::xtensor x = {{0.2500000000000000, 0.2500000000000000, 0.2500000000000000}, {0.6175871903000830, 0.1274709365666390, 0.1274709365666390}, {0.1274709365666390, 0.1274709365666390, 0.1274709365666390}, {0.1274709365666390, 0.1274709365666390, 0.6175871903000830}, {0.1274709365666390, 0.6175871903000830, 0.1274709365666390}, {0.9037635088221031, 0.0320788303926323, 0.0320788303926323}, {0.0320788303926323, 0.0320788303926323, 0.0320788303926323}, {0.0320788303926323, 0.0320788303926323, 0.9037635088221031}, {0.0320788303926323, 0.9037635088221031, 0.0320788303926323}, {0.4502229043567190, 0.0497770956432810, 0.0497770956432810}, {0.0497770956432810, 0.4502229043567190, 0.0497770956432810}, {0.0497770956432810, 0.0497770956432810, 0.4502229043567190}, {0.0497770956432810, 0.4502229043567190, 0.4502229043567190}, {0.4502229043567190, 0.0497770956432810, 0.4502229043567190}, {0.4502229043567190, 0.4502229043567190, 0.0497770956432810}, {0.3162695526014501, 0.1837304473985499, 0.1837304473985499}, {0.1837304473985499, 0.3162695526014501, 0.1837304473985499}, {0.1837304473985499, 0.1837304473985499, 0.3162695526014501}, {0.1837304473985499, 0.3162695526014501, 0.3162695526014501}, {0.3162695526014501, 0.1837304473985499, 0.3162695526014501}, {0.3162695526014501, 0.3162695526014501, 0.1837304473985499}, {0.0229177878448171, 0.2319010893971509, 0.2319010893971509}, {0.2319010893971509, 0.0229177878448171, 0.2319010893971509}, {0.2319010893971509, 0.2319010893971509, 0.0229177878448171}, {0.5132800333608811, 0.2319010893971509, 0.2319010893971509}, {0.2319010893971509, 0.5132800333608811, 0.2319010893971509}, {0.2319010893971509, 0.2319010893971509, 0.5132800333608811}, {0.2319010893971509, 0.0229177878448171, 0.5132800333608811}, {0.0229177878448171, 0.5132800333608811, 0.2319010893971509}, {0.5132800333608811, 0.2319010893971509, 0.0229177878448171}, {0.2319010893971509, 0.5132800333608811, 0.0229177878448171}, {0.0229177878448171, 0.2319010893971509, 0.5132800333608811}, {0.5132800333608811, 0.0229177878448171, 0.2319010893971509}, {0.7303134278075384, 0.0379700484718286, 0.0379700484718286}, {0.0379700484718286, 0.7303134278075384, 0.0379700484718286}, {0.0379700484718286, 0.0379700484718286, 0.7303134278075384}, {0.1937464752488044, 0.0379700484718286, 0.0379700484718286}, {0.0379700484718286, 0.1937464752488044, 0.0379700484718286}, {0.0379700484718286, 0.0379700484718286, 0.1937464752488044}, {0.0379700484718286, 0.7303134278075384, 0.1937464752488044}, {0.7303134278075384, 0.1937464752488044, 0.0379700484718286}, {0.1937464752488044, 0.0379700484718286, 0.7303134278075384}, {0.0379700484718286, 0.1937464752488044, 0.7303134278075384}, {0.7303134278075384, 0.0379700484718286, 0.1937464752488044}, {0.1937464752488044, 0.7303134278075384, 0.0379700484718286}}; std::vector w = {-0.2359620398477557, 0.0244878963560562, 0.0244878963560562, 0.0244878963560562, 0.0244878963560562, 0.0039485206398261, 0.0039485206398261, 0.0039485206398261, 0.0039485206398261, 0.0263055529507371, 0.0263055529507371, 0.0263055529507371, 0.0263055529507371, 0.0263055529507371, 0.0263055529507371, 0.0829803830550589, 0.0829803830550589, 0.0829803830550589, 0.0829803830550589, 0.0829803830550589, 0.0829803830550589, 0.0254426245481023, 0.0254426245481023, 0.0254426245481023, 0.0254426245481023, 0.0254426245481023, 0.0254426245481023, 0.0254426245481023, 0.0254426245481023, 0.0254426245481023, 0.0254426245481023, 0.0254426245481023, 0.0254426245481023, 0.0134324384376852, 0.0134324384376852, 0.0134324384376852, 0.0134324384376852, 0.0134324384376852, 0.0134324384376852, 0.0134324384376852, 0.0134324384376852, 0.0134324384376852, 0.0134324384376852, 0.0134324384376852, 0.0134324384376852}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return x / 6.0; }); return {x, w}; } else { const int np = (m + 2) / 2; return quadrature::make_quadrature_tetrahedron_collapsed(np); } } //----------------------------------------------------------------------------- std::pair, std::vector> make_xiao_gimbutas_triangle_quadrature(int m) { if (m == 10) { // Scheme from Xiao Gimbutas, 25 points, degree of precision 10 xt::xtensor x = {{0.3333333333333333, 0.3333333333333333}, {0.4951734598011705, 0.4951734598011705}, {0.019139415242841296, 0.019139415242841296}, {0.18448501268524653, 0.18448501268524653}, {0.42823482094371884, 0.42823482094371884}, {0.4951734598011705, 0.009653080397658997}, {0.019139415242841296, 0.9617211695143174}, {0.18448501268524653, 0.6310299746295069}, {0.42823482094371884, 0.14353035811256232}, {0.009653080397658997, 0.4951734598011705}, {0.9617211695143174, 0.019139415242841296}, {0.6310299746295069, 0.18448501268524653}, {0.14353035811256232, 0.42823482094371884}, {0.03472362048232748, 0.13373475510086913}, {0.03758272734119169, 0.3266931362813369}, {0.8315416244168035, 0.03472362048232748}, {0.6357241363774714, 0.03758272734119169}, {0.13373475510086913, 0.8315416244168035}, {0.3266931362813369, 0.6357241363774714}, {0.13373475510086913, 0.03472362048232748}, {0.3266931362813369, 0.03758272734119169}, {0.8315416244168035, 0.13373475510086913}, {0.6357241363774714, 0.3266931362813369}, {0.03472362048232748, 0.8315416244168035}, {0.03758272734119169, 0.6357241363774714}}; std::vector w = {0.08361487437397393, 0.009792590498418303, 0.006385359230118654, 0.07863376974637727, 0.07524732796854398, 0.009792590498418303, 0.006385359230118654, 0.07863376974637727, 0.07524732796854398, 0.009792590498418303, 0.006385359230118654, 0.07863376974637727, 0.07524732796854398, 0.028962281463256342, 0.038739049086018905, 0.028962281463256342, 0.038739049086018905, 0.028962281463256342, 0.038739049086018905, 0.028962281463256342, 0.038739049086018905, 0.028962281463256342, 0.038739049086018905, 0.028962281463256342, 0.038739049086018905}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return x / 2.0; }); return {x, w}; } else if (m == 11) { // Scheme from Xiao Gimbutas, 28 points, degree of precision 11 xt::xtensor x = {{0.3333333333333333, 0.3333333333333333}, {0.030846895635588123, 0.030846895635588123}, {0.49878016517846074, 0.49878016517846074}, {0.11320782728669404, 0.11320782728669404}, {0.4366550163931761, 0.4366550163931761}, {0.21448345861926937, 0.21448345861926937}, {0.030846895635588123, 0.9383062087288238}, {0.49878016517846074, 0.0024396696430785125}, {0.11320782728669404, 0.7735843454266119}, {0.4366550163931761, 0.12668996721364778}, {0.21448345861926937, 0.5710330827614613}, {0.9383062087288238, 0.030846895635588123}, {0.0024396696430785125, 0.49878016517846074}, {0.7735843454266119, 0.11320782728669404}, {0.12668996721364778, 0.4366550163931761}, {0.5710330827614613, 0.21448345861926937}, {0.014366662569555624, 0.1593036198376935}, {0.04766406697215078, 0.31063121631346313}, {0.8263297175927509, 0.014366662569555624}, {0.6417047167143861, 0.04766406697215078}, {0.1593036198376935, 0.8263297175927509}, {0.31063121631346313, 0.6417047167143861}, {0.1593036198376935, 0.014366662569555624}, {0.31063121631346313, 0.04766406697215078}, {0.8263297175927509, 0.1593036198376935}, {0.6417047167143861, 0.31063121631346313}, {0.014366662569555624, 0.8263297175927509}, {0.04766406697215078, 0.6417047167143861}}; std::vector w = {0.08144513470935129, 0.012249296950707964, 0.012465491873881381, 0.04012924238130832, 0.06309487215989869, 0.06784510774369515, 0.012249296950707964, 0.012465491873881381, 0.04012924238130832, 0.06309487215989869, 0.06784510774369515, 0.012249296950707964, 0.012465491873881381, 0.04012924238130832, 0.06309487215989869, 0.06784510774369515, 0.014557623337809246, 0.04064284865588647, 0.014557623337809246, 0.04064284865588647, 0.014557623337809246, 0.04064284865588647, 0.014557623337809246, 0.04064284865588647, 0.014557623337809246, 0.04064284865588647, 0.014557623337809246, 0.04064284865588647}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return x / 2.0; }); return {x, w}; } else if (m == 12) { // Scheme from Xiao Gimbutas, 33 points, degree of precision 12 xt::xtensor x = {{0.27146250701492614, 0.27146250701492614}, {0.10925782765935432, 0.10925782765935432}, {0.4401116486585931, 0.4401116486585931}, {0.4882037509455415, 0.4882037509455415}, {0.02464636343633564, 0.02464636343633564}, {0.27146250701492614, 0.45707498597014773}, {0.10925782765935432, 0.7814843446812914}, {0.4401116486585931, 0.11977670268281382}, {0.4882037509455415, 0.02359249810891695}, {0.02464636343633564, 0.9507072731273287}, {0.45707498597014773, 0.27146250701492614}, {0.7814843446812914, 0.10925782765935432}, {0.11977670268281382, 0.4401116486585931}, {0.02359249810891695, 0.4882037509455415}, {0.9507072731273287, 0.02464636343633564}, {0.1162960196779266, 0.25545422863851736}, {0.021382490256170623, 0.12727971723358936}, {0.023034156355267166, 0.29165567973834094}, {0.6282497516835561, 0.1162960196779266}, {0.85133779251024, 0.021382490256170623}, {0.6853101639063919, 0.023034156355267166}, {0.25545422863851736, 0.6282497516835561}, {0.12727971723358936, 0.85133779251024}, {0.29165567973834094, 0.6853101639063919}, {0.25545422863851736, 0.1162960196779266}, {0.12727971723358936, 0.021382490256170623}, {0.29165567973834094, 0.023034156355267166}, {0.6282497516835561, 0.25545422863851736}, {0.85133779251024, 0.12727971723358936}, {0.6853101639063919, 0.29165567973834094}, {0.1162960196779266, 0.6282497516835561}, {0.021382490256170623, 0.85133779251024}, {0.023034156355267166, 0.6853101639063919}}; std::vector w = {0.06254121319590276, 0.02848605206887755, 0.04991833492806095, 0.024266838081452035, 0.007931642509973639, 0.06254121319590276, 0.02848605206887755, 0.04991833492806095, 0.024266838081452035, 0.007931642509973639, 0.06254121319590276, 0.02848605206887755, 0.04991833492806095, 0.024266838081452035, 0.007931642509973639, 0.04322736365941421, 0.015083677576511441, 0.02178358503860756, 0.04322736365941421, 0.015083677576511441, 0.02178358503860756, 0.04322736365941421, 0.015083677576511441, 0.02178358503860756, 0.04322736365941421, 0.015083677576511441, 0.02178358503860756, 0.04322736365941421, 0.015083677576511441, 0.02178358503860756, 0.04322736365941421, 0.015083677576511441, 0.02178358503860756}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return x / 2.0; }); return {x, w}; } else if (m == 13) { // Scheme from Xiao Gimbutas, 37 points, degree of precision 13 xt::xtensor x = {{0.3333333333333333, 0.3333333333333333}, {0.4961358947410461, 0.4961358947410461}, {0.4696086896534919, 0.4696086896534919}, {0.23111028494908226, 0.23111028494908226}, {0.4144775702790546, 0.4144775702790546}, {0.11355991257213327, 0.11355991257213327}, {0.024895931491216494, 0.024895931491216494}, {0.4961358947410461, 0.007728210517907841}, {0.4696086896534919, 0.06078262069301621}, {0.23111028494908226, 0.5377794301018355}, {0.4144775702790546, 0.17104485944189085}, {0.11355991257213327, 0.7728801748557335}, {0.024895931491216494, 0.950208137017567}, {0.007728210517907841, 0.4961358947410461}, {0.06078262069301621, 0.4696086896534919}, {0.5377794301018355, 0.23111028494908226}, {0.17104485944189085, 0.4144775702790546}, {0.7728801748557335, 0.11355991257213327}, {0.950208137017567, 0.024895931491216494}, {0.01898800438375904, 0.2920786885766364}, {0.09773603106601653, 0.26674525331035115}, {0.021966344206529244, 0.1267997757838373}, {0.6889333070396046, 0.01898800438375904}, {0.6355187156236324, 0.09773603106601653}, {0.8512338800096335, 0.021966344206529244}, {0.2920786885766364, 0.6889333070396046}, {0.26674525331035115, 0.6355187156236324}, {0.1267997757838373, 0.8512338800096335}, {0.2920786885766364, 0.01898800438375904}, {0.26674525331035115, 0.09773603106601653}, {0.1267997757838373, 0.021966344206529244}, {0.6889333070396046, 0.2920786885766364}, {0.6355187156236324, 0.26674525331035115}, {0.8512338800096335, 0.1267997757838373}, {0.01898800438375904, 0.6889333070396046}, {0.09773603106601653, 0.6355187156236324}, {0.021966344206529244, 0.8512338800096335}}; std::vector w = {0.05162264666429082, 0.009941476361072588, 0.03278124160372298, 0.04606240959277825, 0.0469470955421552, 0.030903097975759793, 0.008029399795258423, 0.009941476361072588, 0.03278124160372298, 0.04606240959277825, 0.0469470955421552, 0.030903097975759793, 0.008029399795258423, 0.009941476361072588, 0.03278124160372298, 0.04606240959277825, 0.0469470955421552, 0.030903097975759793, 0.008029399795258423, 0.01812549864620088, 0.037211960457261536, 0.015393072683782177, 0.01812549864620088, 0.037211960457261536, 0.015393072683782177, 0.01812549864620088, 0.037211960457261536, 0.015393072683782177, 0.01812549864620088, 0.037211960457261536, 0.015393072683782177, 0.01812549864620088, 0.037211960457261536, 0.015393072683782177, 0.01812549864620088, 0.037211960457261536, 0.015393072683782177}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return x / 2.0; }); return {x, w}; } else if (m == 14) { // Scheme from Xiao Gimbutas, 42 points, degree of precision 14 xt::xtensor x = { {0.41764471934045394, 0.41764471934045394}, {0.0617998830908727, 0.0617998830908727}, {0.2734775283088387, 0.2734775283088387}, {0.1772055324125435, 0.1772055324125435}, {0.0193909612487011, 0.0193909612487011}, {0.4889639103621786, 0.4889639103621786}, {0.41764471934045394, 0.16471056131909212}, {0.0617998830908727, 0.8764002338182546}, {0.2734775283088387, 0.4530449433823226}, {0.1772055324125435, 0.645588935174913}, {0.0193909612487011, 0.9612180775025978}, {0.4889639103621786, 0.022072179275642756}, {0.16471056131909212, 0.41764471934045394}, {0.8764002338182546, 0.0617998830908727}, {0.4530449433823226, 0.2734775283088387}, {0.645588935174913, 0.1772055324125435}, {0.9612180775025978, 0.0193909612487011}, {0.022072179275642756, 0.4889639103621786}, {0.014646950055654471, 0.29837288213625773}, {0.09291624935697185, 0.336861459796345}, {0.05712475740364799, 0.17226668782135557}, {0.001268330932872076, 0.11897449769695682}, {0.6869801678080878, 0.014646950055654471}, {0.5702222908466832, 0.09291624935697185}, {0.7706085547749965, 0.05712475740364799}, {0.8797571713701712, 0.001268330932872076}, {0.29837288213625773, 0.6869801678080878}, {0.336861459796345, 0.5702222908466832}, {0.17226668782135557, 0.7706085547749965}, {0.11897449769695682, 0.8797571713701712}, {0.29837288213625773, 0.014646950055654471}, {0.336861459796345, 0.09291624935697185}, {0.17226668782135557, 0.05712475740364799}, {0.11897449769695682, 0.001268330932872076}, {0.6869801678080878, 0.29837288213625773}, {0.5702222908466832, 0.336861459796345}, {0.7706085547749965, 0.17226668782135557}, {0.8797571713701712, 0.11897449769695682}, {0.014646950055654471, 0.6869801678080878}, {0.09291624935697185, 0.5702222908466832}, {0.05712475740364799, 0.7706085547749965}, {0.001268330932872076, 0.8797571713701712}}; std::vector w = {0.032788353544125355, 0.014433699669776668, 0.051774104507291585, 0.04216258873699302, 0.004923403602400082, 0.021883581369428893, 0.032788353544125355, 0.014433699669776668, 0.051774104507291585, 0.04216258873699302, 0.004923403602400082, 0.021883581369428893, 0.032788353544125355, 0.014433699669776668, 0.051774104507291585, 0.04216258873699302, 0.004923403602400082, 0.021883581369428893, 0.014436308113533842, 0.038571510787060684, 0.024665753212563677, 0.005010228838500672, 0.014436308113533842, 0.038571510787060684, 0.024665753212563677, 0.005010228838500672, 0.014436308113533842, 0.038571510787060684, 0.024665753212563677, 0.005010228838500672, 0.014436308113533842, 0.038571510787060684, 0.024665753212563677, 0.005010228838500672, 0.014436308113533842, 0.038571510787060684, 0.024665753212563677, 0.005010228838500672, 0.014436308113533842, 0.038571510787060684, 0.024665753212563677, 0.005010228838500672}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return x / 2.0; }); return {x, w}; } else if (m == 15) { // Scheme from Xiao Gimbutas, 49 points, degree of precision 15 xt::xtensor x = { {0.3333333333333333, 0.3333333333333333}, {0.1299782299330779, 0.1299782299330779}, {0.4600769492970597, 0.4600769492970597}, {0.4916858166302972, 0.4916858166302972}, {0.22153234079514206, 0.22153234079514206}, {0.39693373740906057, 0.39693373740906057}, {0.0563419176961002, 0.0563419176961002}, {0.1299782299330779, 0.7400435401338442}, {0.4600769492970597, 0.07984610140588055}, {0.4916858166302972, 0.016628366739405598}, {0.22153234079514206, 0.5569353184097159}, {0.39693373740906057, 0.20613252518187886}, {0.0563419176961002, 0.8873161646077996}, {0.7400435401338442, 0.1299782299330779}, {0.07984610140588055, 0.4600769492970597}, {0.016628366739405598, 0.4916858166302972}, {0.5569353184097159, 0.22153234079514206}, {0.20613252518187886, 0.39693373740906057}, {0.8873161646077996, 0.0563419176961002}, {0.08459422148219181, 0.18232178340719132}, {0.016027089786345473, 0.15020038406523872}, {0.09765044243024235, 0.32311131516371266}, {0.018454251904633165, 0.3079476814836729}, {0.0011135352740137417, 0.03803522930110929}, {0.733083995110617, 0.08459422148219181}, {0.8337725261484158, 0.016027089786345473}, {0.5792382424060449, 0.09765044243024235}, {0.673598066611694, 0.018454251904633165}, {0.960851235424877, 0.0011135352740137417}, {0.18232178340719132, 0.733083995110617}, {0.15020038406523872, 0.8337725261484158}, {0.32311131516371266, 0.5792382424060449}, {0.3079476814836729, 0.673598066611694}, {0.03803522930110929, 0.960851235424877}, {0.18232178340719132, 0.08459422148219181}, {0.15020038406523872, 0.016027089786345473}, {0.32311131516371266, 0.09765044243024235}, {0.3079476814836729, 0.018454251904633165}, {0.03803522930110929, 0.0011135352740137417}, {0.733083995110617, 0.18232178340719132}, {0.8337725261484158, 0.15020038406523872}, {0.5792382424060449, 0.32311131516371266}, {0.673598066611694, 0.3079476814836729}, {0.960851235424877, 0.03803522930110929}, {0.08459422148219181, 0.733083995110617}, {0.016027089786345473, 0.8337725261484158}, {0.09765044243024235, 0.5792382424060449}, {0.018454251904633165, 0.673598066611694}, {0.0011135352740137417, 0.960851235424877}}; std::vector w = {0.02973041974807132, 0.0073975040670461, 0.021594087936438452, 0.0158322763500218, 0.046287286105198076, 0.046336041391207235, 0.015084474247597068, 0.0073975040670461, 0.021594087936438452, 0.0158322763500218, 0.046287286105198076, 0.046336041391207235, 0.015084474247597068, 0.0073975040670461, 0.021594087936438452, 0.0158322763500218, 0.046287286105198076, 0.046336041391207235, 0.015084474247597068, 0.024230008783125607, 0.01122850429887806, 0.03107522047051095, 0.016436762092827895, 0.0024752660145579163, 0.024230008783125607, 0.01122850429887806, 0.03107522047051095, 0.016436762092827895, 0.0024752660145579163, 0.024230008783125607, 0.01122850429887806, 0.03107522047051095, 0.016436762092827895, 0.0024752660145579163, 0.024230008783125607, 0.01122850429887806, 0.03107522047051095, 0.016436762092827895, 0.0024752660145579163, 0.024230008783125607, 0.01122850429887806, 0.03107522047051095, 0.016436762092827895, 0.0024752660145579163, 0.024230008783125607, 0.01122850429887806, 0.03107522047051095, 0.016436762092827895, 0.0024752660145579163}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return x / 2.0; }); return {x, w}; } else if (m == 16) { // Scheme from Xiao Gimbutas, 55 points, degree of precision 16 xt::xtensor x = { {0.3333333333333333, 0.3333333333333333}, {0.06667447224023837, 0.06667447224023837}, {0.24132168070137838, 0.24132168070137838}, {0.41279809595522365, 0.41279809595522365}, {0.15006373658703515, 0.15006373658703515}, {0.46954803099668496, 0.46954803099668496}, {0.017041629405718517, 0.017041629405718517}, {0.06667447224023837, 0.8666510555195233}, {0.24132168070137838, 0.5173566385972432}, {0.41279809595522365, 0.1744038080895527}, {0.15006373658703515, 0.6998725268259297}, {0.46954803099668496, 0.060903938006630076}, {0.017041629405718517, 0.965916741188563}, {0.8666510555195233, 0.06667447224023837}, {0.5173566385972432, 0.24132168070137838}, {0.1744038080895527, 0.41279809595522365}, {0.6998725268259297, 0.15006373658703515}, {0.060903938006630076, 0.46954803099668496}, {0.965916741188563, 0.017041629405718517}, {0.009664954403660254, 0.41376948582708517}, {0.030305943355186365, 0.30417944822947973}, {0.010812972776103751, 0.08960908902270585}, {0.10665316053614844, 0.29661537240038294}, {0.051354315344013114, 0.16976335515028973}, {0.0036969427073556124, 0.21404877992584728}, {0.5765655597692546, 0.009664954403660254}, {0.6655146084153339, 0.030305943355186365}, {0.8995779382011905, 0.010812972776103751}, {0.5967314670634686, 0.10665316053614844}, {0.7788823295056971, 0.051354315344013114}, {0.7822542773667971, 0.0036969427073556124}, {0.41376948582708517, 0.5765655597692546}, {0.30417944822947973, 0.6655146084153339}, {0.08960908902270585, 0.8995779382011905}, {0.29661537240038294, 0.5967314670634686}, {0.16976335515028973, 0.7788823295056971}, {0.21404877992584728, 0.7822542773667971}, {0.41376948582708517, 0.009664954403660254}, {0.30417944822947973, 0.030305943355186365}, {0.08960908902270585, 0.010812972776103751}, {0.29661537240038294, 0.10665316053614844}, {0.16976335515028973, 0.051354315344013114}, {0.21404877992584728, 0.0036969427073556124}, {0.5765655597692546, 0.41376948582708517}, {0.6655146084153339, 0.30417944822947973}, {0.8995779382011905, 0.08960908902270585}, {0.5967314670634686, 0.29661537240038294}, {0.7788823295056971, 0.16976335515028973}, {0.7822542773667971, 0.21404877992584728}, {0.009664954403660254, 0.5765655597692546}, {0.030305943355186365, 0.6655146084153339}, {0.010812972776103751, 0.8995779382011905}, {0.10665316053614844, 0.5967314670634686}, {0.051354315344013114, 0.7788823295056971}, {0.0036969427073556124, 0.7822542773667971}}; std::vector w = {0.046227910314191344, 0.012425425595561009, 0.04118404106979255, 0.040985219786815366, 0.02878349670274891, 0.02709366946771045, 0.003789135238264222, 0.012425425595561009, 0.04118404106979255, 0.040985219786815366, 0.02878349670274891, 0.02709366946771045, 0.003789135238264222, 0.012425425595561009, 0.04118404106979255, 0.040985219786815366, 0.02878349670274891, 0.02709366946771045, 0.003789135238264222, 0.008182210553222139, 0.013983607124653567, 0.005751869970497159, 0.031646061681983244, 0.017653081047103284, 0.0046146906397291345, 0.008182210553222139, 0.013983607124653567, 0.005751869970497159, 0.031646061681983244, 0.017653081047103284, 0.0046146906397291345, 0.008182210553222139, 0.013983607124653567, 0.005751869970497159, 0.031646061681983244, 0.017653081047103284, 0.0046146906397291345, 0.008182210553222139, 0.013983607124653567, 0.005751869970497159, 0.031646061681983244, 0.017653081047103284, 0.0046146906397291345, 0.008182210553222139, 0.013983607124653567, 0.005751869970497159, 0.031646061681983244, 0.017653081047103284, 0.0046146906397291345, 0.008182210553222139, 0.013983607124653567, 0.005751869970497159, 0.031646061681983244, 0.017653081047103284, 0.0046146906397291345}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return x / 2.0; }); return {x, w}; } else if (m == 17) { // Scheme from Xiao Gimbutas, 60 points, degree of precision 17 xt::xtensor x = {{0.4171034443615992, 0.4171034443615992}, {0.18035811626637066, 0.18035811626637066}, {0.2857065024365867, 0.2857065024365867}, {0.06665406347959701, 0.06665406347959701}, {0.014755491660754072, 0.014755491660754072}, {0.46559787161889027, 0.46559787161889027}, {0.4171034443615992, 0.16579311127680163}, {0.18035811626637066, 0.6392837674672587}, {0.2857065024365867, 0.42858699512682663}, {0.06665406347959701, 0.866691873040806}, {0.014755491660754072, 0.9704890166784919}, {0.46559787161889027, 0.06880425676221946}, {0.16579311127680163, 0.4171034443615992}, {0.6392837674672587, 0.18035811626637066}, {0.42858699512682663, 0.2857065024365867}, {0.866691873040806, 0.06665406347959701}, {0.9704890166784919, 0.014755491660754072}, {0.06880425676221946, 0.46559787161889027}, {0.011575175903180683, 0.07250547079900238}, {0.013229672760086951, 0.41547545929522905}, {0.013135870834002753, 0.27179187005535477}, {0.15750547792686992, 0.29921894247697034}, {0.06734937786736123, 0.3062815917461865}, {0.07804234056828245, 0.16872251349525944}, {0.016017642362119337, 0.15919228747279268}, {0.9159193532978169, 0.011575175903180683}, {0.5712948679446841, 0.013229672760086951}, {0.7150722591106424, 0.013135870834002753}, {0.5432755795961598, 0.15750547792686992}, {0.6263690303864522, 0.06734937786736123}, {0.7532351459364581, 0.07804234056828245}, {0.824790070165088, 0.016017642362119337}, {0.07250547079900238, 0.9159193532978169}, {0.41547545929522905, 0.5712948679446841}, {0.27179187005535477, 0.7150722591106424}, {0.29921894247697034, 0.5432755795961598}, {0.3062815917461865, 0.6263690303864522}, {0.16872251349525944, 0.7532351459364581}, {0.15919228747279268, 0.824790070165088}, {0.07250547079900238, 0.011575175903180683}, {0.41547545929522905, 0.013229672760086951}, {0.27179187005535477, 0.013135870834002753}, {0.29921894247697034, 0.15750547792686992}, {0.3062815917461865, 0.06734937786736123}, {0.16872251349525944, 0.07804234056828245}, {0.15919228747279268, 0.016017642362119337}, {0.9159193532978169, 0.07250547079900238}, {0.5712948679446841, 0.41547545929522905}, {0.7150722591106424, 0.27179187005535477}, {0.5432755795961598, 0.29921894247697034}, {0.6263690303864522, 0.3062815917461865}, {0.7532351459364581, 0.16872251349525944}, {0.824790070165088, 0.15919228747279268}, {0.011575175903180683, 0.9159193532978169}, {0.013229672760086951, 0.5712948679446841}, {0.013135870834002753, 0.7150722591106424}, {0.15750547792686992, 0.5432755795961598}, {0.06734937786736123, 0.6263690303864522}, {0.07804234056828245, 0.7532351459364581}, {0.016017642362119337, 0.824790070165088}}; std::vector w = { 0.027310926528102106, 0.026312630588017985, 0.03771623715279528, 0.012459000802305444, 0.002773887577637642, 0.02501945095049736, 0.027310926528102106, 0.026312630588017985, 0.03771623715279528, 0.012459000802305444, 0.002773887577637642, 0.02501945095049736, 0.027310926528102106, 0.026312630588017985, 0.03771623715279528, 0.012459000802305444, 0.002773887577637642, 0.02501945095049736, 0.004584348401735868, 0.010398439955839537, 0.008692214501001192, 0.02617162593533699, 0.022487772546691067, 0.02055789832045452, 0.007978300205929593, 0.004584348401735868, 0.010398439955839537, 0.008692214501001192, 0.02617162593533699, 0.022487772546691067, 0.02055789832045452, 0.007978300205929593, 0.004584348401735868, 0.010398439955839537, 0.008692214501001192, 0.02617162593533699, 0.022487772546691067, 0.02055789832045452, 0.007978300205929593, 0.004584348401735868, 0.010398439955839537, 0.008692214501001192, 0.02617162593533699, 0.022487772546691067, 0.02055789832045452, 0.007978300205929593, 0.004584348401735868, 0.010398439955839537, 0.008692214501001192, 0.02617162593533699, 0.022487772546691067, 0.02055789832045452, 0.007978300205929593, 0.004584348401735868, 0.010398439955839537, 0.008692214501001192, 0.02617162593533699, 0.022487772546691067, 0.02055789832045452, 0.007978300205929593}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return x / 2.0; }); return {x, w}; } else if (m == 18) { // Scheme from Xiao Gimbutas, 67 points, degree of precision 18 xt::xtensor x = { {0.3333333333333333, 0.3333333333333333}, {0.4749182113240457, 0.4749182113240457}, {0.15163850697260495, 0.15163850697260495}, {0.4110671018759195, 0.4110671018759195}, {0.2656146099053742, 0.2656146099053742}, {0.0037589443410684376, 0.0037589443410684376}, {0.072438705567333, 0.072438705567333}, {0.4749182113240457, 0.05016357735190857}, {0.15163850697260495, 0.6967229860547901}, {0.4110671018759195, 0.177865796248161}, {0.2656146099053742, 0.46877078018925156}, {0.0037589443410684376, 0.9924821113178631}, {0.072438705567333, 0.855122588865334}, {0.05016357735190857, 0.4749182113240457}, {0.6967229860547901, 0.15163850697260495}, {0.177865796248161, 0.4110671018759195}, {0.46877078018925156, 0.2656146099053742}, {0.9924821113178631, 0.0037589443410684376}, {0.855122588865334, 0.072438705567333}, {0.09042704035434063, 0.3850440344131637}, {0.012498932483495477, 0.04727614183265175}, {0.05401173533902428, 0.30206195771287075}, {0.010505018819241962, 0.2565061597742415}, {0.06612245802840343, 0.17847912556588763}, {0.14906691012577386, 0.2685733063960138}, {0.011691824674667157, 0.41106566867461836}, {0.014331524778941987, 0.1327788302713893}, {0.5245289252324957, 0.09042704035434063}, {0.9402249256838529, 0.012498932483495477}, {0.6439263069481049, 0.05401173533902428}, {0.7329888214065166, 0.010505018819241962}, {0.7553984164057089, 0.06612245802840343}, {0.5823597834782124, 0.14906691012577386}, {0.5772425066507145, 0.011691824674667157}, {0.8528896449496688, 0.014331524778941987}, {0.3850440344131637, 0.5245289252324957}, {0.04727614183265175, 0.9402249256838529}, {0.30206195771287075, 0.6439263069481049}, {0.2565061597742415, 0.7329888214065166}, {0.17847912556588763, 0.7553984164057089}, {0.2685733063960138, 0.5823597834782124}, {0.41106566867461836, 0.5772425066507145}, {0.1327788302713893, 0.8528896449496688}, {0.3850440344131637, 0.09042704035434063}, {0.04727614183265175, 0.012498932483495477}, {0.30206195771287075, 0.05401173533902428}, {0.2565061597742415, 0.010505018819241962}, {0.17847912556588763, 0.06612245802840343}, {0.2685733063960138, 0.14906691012577386}, {0.41106566867461836, 0.011691824674667157}, {0.1327788302713893, 0.014331524778941987}, {0.5245289252324957, 0.3850440344131637}, {0.9402249256838529, 0.04727614183265175}, {0.6439263069481049, 0.30206195771287075}, {0.7329888214065166, 0.2565061597742415}, {0.7553984164057089, 0.17847912556588763}, {0.5823597834782124, 0.2685733063960138}, {0.5772425066507145, 0.41106566867461836}, {0.8528896449496688, 0.1327788302713893}, {0.09042704035434063, 0.5245289252324957}, {0.012498932483495477, 0.9402249256838529}, {0.05401173533902428, 0.6439263069481049}, {0.010505018819241962, 0.7329888214065166}, {0.06612245802840343, 0.7553984164057089}, {0.14906691012577386, 0.5823597834782124}, {0.011691824674667157, 0.5772425066507145}, {0.014331524778941987, 0.8528896449496688}}; std::vector w = { 0.03074852123911586, 0.013107027491738756, 0.0203183388454584, 0.0334719940598479, 0.031116396602006133, 0.0005320056169477806, 0.013790286604766942, 0.013107027491738756, 0.0203183388454584, 0.0334719940598479, 0.031116396602006133, 0.0005320056169477806, 0.013790286604766942, 0.013107027491738756, 0.0203183388454584, 0.0334719940598479, 0.031116396602006133, 0.0005320056169477806, 0.013790286604766942, 0.015328258194553142, 0.004217516774744443, 0.016365908413986566, 0.007729835280006227, 0.01691165391748008, 0.02759288648857948, 0.009586124474361505, 0.007641704972719637, 0.015328258194553142, 0.004217516774744443, 0.016365908413986566, 0.007729835280006227, 0.01691165391748008, 0.02759288648857948, 0.009586124474361505, 0.007641704972719637, 0.015328258194553142, 0.004217516774744443, 0.016365908413986566, 0.007729835280006227, 0.01691165391748008, 0.02759288648857948, 0.009586124474361505, 0.007641704972719637, 0.015328258194553142, 0.004217516774744443, 0.016365908413986566, 0.007729835280006227, 0.01691165391748008, 0.02759288648857948, 0.009586124474361505, 0.007641704972719637, 0.015328258194553142, 0.004217516774744443, 0.016365908413986566, 0.007729835280006227, 0.01691165391748008, 0.02759288648857948, 0.009586124474361505, 0.007641704972719637, 0.015328258194553142, 0.004217516774744443, 0.016365908413986566, 0.007729835280006227, 0.01691165391748008, 0.02759288648857948, 0.009586124474361505, 0.007641704972719637}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return x / 2.0; }); return {x, w}; } else if (m == 19) { // Scheme from Xiao Gimbutas, 73 points, degree of precision xt::xtensor x = { {0.3333333333333333, 0.3333333333333333}, {0.05252627985410363, 0.05252627985410363}, {0.11144805571699878, 0.11144805571699878}, {0.011639027327922657, 0.011639027327922657}, {0.25516213315312486, 0.25516213315312486}, {0.4039697179663861, 0.4039697179663861}, {0.17817100607962755, 0.17817100607962755}, {0.4591943889568276, 0.4591943889568276}, {0.4925124498658742, 0.4925124498658742}, {0.05252627985410363, 0.8949474402917927}, {0.11144805571699878, 0.7771038885660024}, {0.011639027327922657, 0.9767219453441547}, {0.25516213315312486, 0.4896757336937503}, {0.4039697179663861, 0.19206056406722782}, {0.17817100607962755, 0.6436579878407449}, {0.4591943889568276, 0.08161122208634475}, {0.4925124498658742, 0.014975100268251551}, {0.8949474402917927, 0.05252627985410363}, {0.7771038885660024, 0.11144805571699878}, {0.9767219453441547, 0.011639027327922657}, {0.4896757336937503, 0.25516213315312486}, {0.19206056406722782, 0.4039697179663861}, {0.6436579878407449, 0.17817100607962755}, {0.08161122208634475, 0.4591943889568276}, {0.014975100268251551, 0.4925124498658742}, {0.005005142352350433, 0.1424222825711269}, {0.009777061438676854, 0.06008389996270236}, {0.039142449434608845, 0.13070066996053453}, {0.129312809767979, 0.31131838322398686}, {0.07456118930435514, 0.22143394188911344}, {0.04088831446497813, 0.3540259269997119}, {0.014923638907438481, 0.24189410400689262}, {0.0020691038491023883, 0.36462041433871}, {0.8525725750765227, 0.005005142352350433}, {0.9301390385986208, 0.009777061438676854}, {0.8301568806048566, 0.039142449434608845}, {0.5593688070080342, 0.129312809767979}, {0.7040048688065313, 0.07456118930435514}, {0.60508575853531, 0.04088831446497813}, {0.7431822570856689, 0.014923638907438481}, {0.6333104818121875, 0.0020691038491023883}, {0.1424222825711269, 0.8525725750765227}, {0.06008389996270236, 0.9301390385986208}, {0.13070066996053453, 0.8301568806048566}, {0.31131838322398686, 0.5593688070080342}, {0.22143394188911344, 0.7040048688065313}, {0.3540259269997119, 0.60508575853531}, {0.24189410400689262, 0.7431822570856689}, {0.36462041433871, 0.6333104818121875}, {0.1424222825711269, 0.005005142352350433}, {0.06008389996270236, 0.009777061438676854}, {0.13070066996053453, 0.039142449434608845}, {0.31131838322398686, 0.129312809767979}, {0.22143394188911344, 0.07456118930435514}, {0.3540259269997119, 0.04088831446497813}, {0.24189410400689262, 0.014923638907438481}, {0.36462041433871, 0.0020691038491023883}, {0.8525725750765227, 0.1424222825711269}, {0.9301390385986208, 0.06008389996270236}, {0.8301568806048566, 0.13070066996053453}, {0.5593688070080342, 0.31131838322398686}, {0.7040048688065313, 0.22143394188911344}, {0.60508575853531, 0.3540259269997119}, {0.7431822570856689, 0.24189410400689262}, {0.6333104818121875, 0.36462041433871}, {0.005005142352350433, 0.8525725750765227}, {0.009777061438676854, 0.9301390385986208}, {0.039142449434608845, 0.8301568806048566}, {0.129312809767979, 0.5593688070080342}, {0.07456118930435514, 0.7040048688065313}, {0.04088831446497813, 0.60508575853531}, {0.014923638907438481, 0.7431822570856689}, {0.0020691038491023883, 0.6333104818121875} }; std::vector w = {0.034469160850905275, 0.007109393622794947, 0.015234956517004836, 0.0017651924183085402, 0.03175285458752998, 0.03153735864523962, 0.02465198105358483, 0.022983570977123252, 0.010321882182418864, 0.007109393622794947, 0.015234956517004836, 0.0017651924183085402, 0.03175285458752998, 0.03153735864523962, 0.02465198105358483, 0.022983570977123252, 0.010321882182418864, 0.007109393622794947, 0.015234956517004836, 0.0017651924183085402, 0.03175285458752998, 0.03153735864523962, 0.02465198105358483, 0.022983570977123252, 0.010321882182418864, 0.0029256924878800715, 0.0033273888405939045, 0.009695519081624202, 0.026346264707445364, 0.018108074590430505, 0.016102209460939428, 0.00845592483909348, 0.0032821375148397378, 0.0029256924878800715, 0.0033273888405939045, 0.009695519081624202, 0.026346264707445364, 0.018108074590430505, 0.016102209460939428, 0.00845592483909348, 0.0032821375148397378, 0.0029256924878800715, 0.0033273888405939045, 0.009695519081624202, 0.026346264707445364, 0.018108074590430505, 0.016102209460939428, 0.00845592483909348, 0.0032821375148397378, 0.0029256924878800715, 0.0033273888405939045, 0.009695519081624202, 0.026346264707445364, 0.018108074590430505, 0.016102209460939428, 0.00845592483909348, 0.0032821375148397378, 0.0029256924878800715, 0.0033273888405939045, 0.009695519081624202, 0.026346264707445364, 0.018108074590430505, 0.016102209460939428, 0.00845592483909348, 0.0032821375148397378, 0.0029256924878800715, 0.0033273888405939045, 0.009695519081624202, 0.026346264707445364, 0.018108074590430505, 0.016102209460939428, 0.00845592483909348, 0.0032821375148397378}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return x / 2.0; }); return {x, w}; } else if (m == 20) { // Scheme from Xiao Gimbutas, 79 points, degree of precision 20 xt::xtensor x = { {0.3333333333333333, 0.3333333333333333}, {0.18629499774454095, 0.18629499774454095}, {0.037310880598884766, 0.037310880598884766}, {0.476245611540499, 0.476245611540499}, {0.4455510569559248, 0.4455510569559248}, {0.25457926767333916, 0.25457926767333916}, {0.39342534781709987, 0.39342534781709987}, {0.01097614102839789, 0.01097614102839789}, {0.10938359671171471, 0.10938359671171471}, {0.18629499774454095, 0.6274100045109181}, {0.037310880598884766, 0.9253782388022305}, {0.476245611540499, 0.047508776919002016}, {0.4455510569559248, 0.10889788608815043}, {0.25457926767333916, 0.4908414646533217}, {0.39342534781709987, 0.21314930436580026}, {0.01097614102839789, 0.9780477179432042}, {0.10938359671171471, 0.7812328065765706}, {0.6274100045109181, 0.18629499774454095}, {0.9253782388022305, 0.037310880598884766}, {0.047508776919002016, 0.476245611540499}, {0.10889788608815043, 0.4455510569559248}, {0.4908414646533217, 0.25457926767333916}, {0.21314930436580026, 0.39342534781709987}, {0.9780477179432042, 0.01097614102839789}, {0.7812328065765706, 0.10938359671171471}, {0.004854937607623827, 0.06409058560843404}, {0.10622720472027006, 0.2156070573900944}, {0.007570780504696579, 0.15913370765706722}, {0.13980807199179993, 0.317860123835772}, {0.04656036490766434, 0.19851813222878817}, {0.038363684775374655, 0.09995229628813862}, {0.009831548292802588, 0.42002375881622406}, {0.05498747914298685, 0.33313481730958744}, {0.01073721285601111, 0.2805814114236652}, {0.9310544767839422, 0.004854937607623827}, {0.6781657378896355, 0.10622720472027006}, {0.8332955118382361, 0.007570780504696579}, {0.5423318041724281, 0.13980807199179993}, {0.7549215028635474, 0.04656036490766434}, {0.8616840189364867, 0.038363684775374655}, {0.5701446928909732, 0.009831548292802588}, {0.6118777035474257, 0.05498747914298685}, {0.7086813757203236, 0.01073721285601111}, {0.06409058560843404, 0.9310544767839422}, {0.2156070573900944, 0.6781657378896355}, {0.15913370765706722, 0.8332955118382361}, {0.317860123835772, 0.5423318041724281}, {0.19851813222878817, 0.7549215028635474}, {0.09995229628813862, 0.8616840189364867}, {0.42002375881622406, 0.5701446928909732}, {0.33313481730958744, 0.6118777035474257}, {0.2805814114236652, 0.7086813757203236}, {0.06409058560843404, 0.004854937607623827}, {0.2156070573900944, 0.10622720472027006}, {0.15913370765706722, 0.007570780504696579}, {0.317860123835772, 0.13980807199179993}, {0.19851813222878817, 0.04656036490766434}, {0.09995229628813862, 0.038363684775374655}, {0.42002375881622406, 0.009831548292802588}, {0.33313481730958744, 0.05498747914298685}, {0.2805814114236652, 0.01073721285601111}, {0.9310544767839422, 0.06409058560843404}, {0.6781657378896355, 0.2156070573900944}, {0.8332955118382361, 0.15913370765706722}, {0.5423318041724281, 0.317860123835772}, {0.7549215028635474, 0.19851813222878817}, {0.8616840189364867, 0.09995229628813862}, {0.5701446928909732, 0.42002375881622406}, {0.6118777035474257, 0.33313481730958744}, {0.7086813757203236, 0.2805814114236652}, {0.004854937607623827, 0.9310544767839422}, {0.10622720472027006, 0.6781657378896355}, {0.007570780504696579, 0.8332955118382361}, {0.13980807199179993, 0.5423318041724281}, {0.04656036490766434, 0.7549215028635474}, {0.038363684775374655, 0.8616840189364867}, {0.009831548292802588, 0.5701446928909732}, {0.05498747914298685, 0.6118777035474257}, {0.01073721285601111, 0.7086813757203236}}; std::vector w = { 0.027820221402906232, 0.01834692594850583, 0.0043225508213311555, 0.014203650606816881, 0.018904799866464896, 0.028166402615040498, 0.027576101258140917, 0.00159768158213324, 0.01566046155214907, 0.01834692594850583, 0.0043225508213311555, 0.014203650606816881, 0.018904799866464896, 0.028166402615040498, 0.027576101258140917, 0.00159768158213324, 0.01566046155214907, 0.01834692594850583, 0.0043225508213311555, 0.014203650606816881, 0.018904799866464896, 0.028166402615040498, 0.027576101258140917, 0.00159768158213324, 0.01566046155214907, 0.002259739204251731, 0.015445215644198462, 0.004405794837116996, 0.02338349146365547, 0.01197279715790938, 0.008291423055227716, 0.007391363000510596, 0.01733445113443867, 0.007156400476915371, 0.002259739204251731, 0.015445215644198462, 0.004405794837116996, 0.02338349146365547, 0.01197279715790938, 0.008291423055227716, 0.007391363000510596, 0.01733445113443867, 0.007156400476915371, 0.002259739204251731, 0.015445215644198462, 0.004405794837116996, 0.02338349146365547, 0.01197279715790938, 0.008291423055227716, 0.007391363000510596, 0.01733445113443867, 0.007156400476915371, 0.002259739204251731, 0.015445215644198462, 0.004405794837116996, 0.02338349146365547, 0.01197279715790938, 0.008291423055227716, 0.007391363000510596, 0.01733445113443867, 0.007156400476915371, 0.002259739204251731, 0.015445215644198462, 0.004405794837116996, 0.02338349146365547, 0.01197279715790938, 0.008291423055227716, 0.007391363000510596, 0.01733445113443867, 0.007156400476915371, 0.002259739204251731, 0.015445215644198462, 0.004405794837116996, 0.02338349146365547, 0.01197279715790938, 0.008291423055227716, 0.007391363000510596, 0.01733445113443867, 0.007156400476915371}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return x / 2.0; }); return {x, w}; } else { throw std::runtime_error("Xiao-Gimbutas not implemented for this order."); } } //----------------------------------------------------------------------------- std::pair, std::vector> make_default_triangle_quadrature(int m) { if (m == 0 or m == 1) { // Scheme from Zienkiewicz and Taylor, 1 point, degree of precision 1 return {{{1.0 / 3.0, 1.0 / 3.0}}, {0.5}}; } else if (m == 2) { // Scheme from Strang and Fix, 3 points, degree of precision 2 xt::xtensor x = { {1.0 / 6.0, 1.0 / 6.0}, {1.0 / 6.0, 2.0 / 3.0}, {2.0 / 3.0, 1.0 / 6.0}}; return {x, {1.0 / 6.0, 1.0 / 6.0, 1.0 / 6.0}}; } else if (m == 3) { // Scheme from Strang and Fix, 6 points, degree of precision 3 xt::xtensor x = {{0.659027622374092, 0.231933368553031}, {0.659027622374092, 0.109039009072877}, {0.231933368553031, 0.659027622374092}, {0.231933368553031, 0.109039009072877}, {0.109039009072877, 0.659027622374092}, {0.109039009072877, 0.231933368553031}}; std::vector w(6, 1.0 / 12.0); return {x, w}; } else if (m == 4) { // Scheme from Strang and Fix, 6 points, degree of precision 4 xt::xtensor x = {{0.816847572980459, 0.091576213509771}, {0.091576213509771, 0.816847572980459}, {0.091576213509771, 0.091576213509771}, {0.108103018168070, 0.445948490915965}, {0.445948490915965, 0.108103018168070}, {0.445948490915965, 0.445948490915965}}; std::vector w = {0.109951743655322, 0.109951743655322, 0.109951743655322, 0.223381589678011, 0.223381589678011, 0.223381589678011}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return 0.5 * x; }); return {x, w}; } else if (m == 5) { // Scheme from Strang and Fix, 7 points, degree of precision 5 xt::xtensor x = {{0.33333333333333333, 0.33333333333333333}, {0.79742698535308720, 0.10128650732345633}, {0.10128650732345633, 0.79742698535308720}, {0.10128650732345633, 0.10128650732345633}, {0.05971587178976981, 0.47014206410511505}, {0.47014206410511505, 0.05971587178976981}, {0.47014206410511505, 0.47014206410511505}}; std::vector w = {0.22500000000000000, 0.12593918054482717, 0.12593918054482717, 0.12593918054482717, 0.13239415278850616, 0.13239415278850616, 0.13239415278850616}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return 0.5 * x; }); return {x, w}; } else if (m == 6) { // Scheme from Strang and Fix, 12 points, degree of precision 6 xt::xtensor x = {{0.873821971016996, 0.063089014491502}, {0.063089014491502, 0.873821971016996}, {0.063089014491502, 0.063089014491502}, {0.501426509658179, 0.249286745170910}, {0.249286745170910, 0.501426509658179}, {0.249286745170910, 0.249286745170910}, {0.636502499121399, 0.310352451033785}, {0.636502499121399, 0.053145049844816}, {0.310352451033785, 0.636502499121399}, {0.310352451033785, 0.053145049844816}, {0.053145049844816, 0.636502499121399}, {0.053145049844816, 0.310352451033785}}; std::vector w = {0.050844906370207, 0.050844906370207, 0.050844906370207, 0.116786275726379, 0.116786275726379, 0.116786275726379, 0.082851075618374, 0.082851075618374, 0.082851075618374, 0.082851075618374, 0.082851075618374, 0.082851075618374}; std::transform(w.cbegin(), w.cend(), w.begin(), [](auto x) { return 0.5 * x; }); return {x, w}; } else if (m >= 10 and m <= 20) { return make_xiao_gimbutas_triangle_quadrature(m); } else { const int np = (m + 2) / 2; return quadrature::make_quadrature_triangle_collapsed(np); } } } // namespace //----------------------------------------------------------------------------- xt::xtensor quadrature::compute_jacobi_deriv(double a, std::size_t n, std::size_t nderiv, const xtl::span& x) { std::vector shape = {x.size()}; const auto _x = xt::adapt(x.data(), x.size(), xt::no_ownership(), shape); xt::xtensor J({nderiv + 1, n + 1, x.size()}); xt::xtensor Jd({n + 1, x.size()}); for (std::size_t i = 0; i < nderiv + 1; ++i) { if (i == 0) xt::row(Jd, 0) = 1.0; else xt::row(Jd, 0) = 0.0; if (n > 0) { if (i == 0) xt::row(Jd, 1) = (_x * (a + 2.0) + a) * 0.5; else if (i == 1) xt::row(Jd, 1) = a * 0.5 + 1; else xt::row(Jd, 1) = 0.0; } for (std::size_t k = 2; k < n + 1; ++k) { const double a1 = 2 * k * (k + a) * (2 * k + a - 2); const double a2 = (2 * k + a - 1) * (a * a) / a1; const double a3 = (2 * k + a - 1) * (2 * k + a) / (2 * k * (k + a)); const double a4 = 2 * (k + a - 1) * (k - 1) * (2 * k + a) / a1; xt::row(Jd, k) = xt::row(Jd, k - 1) * (_x * a3 + a2) - xt::row(Jd, k - 2) * a4; if (i > 0) xt::row(Jd, k) += i * a3 * xt::view(J, i - 1, k - 1, xt::all()); } // Note: using assign, instead of copy assignment, to get around an xtensor // bug with Intel Compilers // https://github.com/xtensor-stack/xtensor/issues/2351 auto J_view = xt::view(J, i, xt::all(), xt::all()); J_view.assign(Jd); } xt::xtensor result({nderiv + 1, x.size()}); for (std::size_t i = 0; i < nderiv + 1; ++i) xt::row(result, i) = xt::view(J, i, n, xt::all()); return result; } //----------------------------------------------------------------------------- std::vector quadrature::compute_gauss_jacobi_points(double a, int m) { /// Computes the m roots of \f$P_{m}^{a,0}\f$ on [-1,1] by Newton's method. /// The initial guesses are the Chebyshev points. Algorithm /// implemented from the pseudocode given by Karniadakis and /// Sherwin const double eps = 1.e-8; const int max_iter = 100; std::vector x(m); for (int k = 0; k < m; ++k) { // Initial guess x[k] = -cos((2.0 * k + 1.0) * M_PI / (2.0 * m)); if (k > 0) x[k] = 0.5 * (x[k] + x[k - 1]); int j = 0; while (j < max_iter) { double s = 0; for (int i = 0; i < k; ++i) s += 1.0 / (x[k] - x[i]); xtl::span _x(&x[k], 1); const xt::xtensor f = quadrature::compute_jacobi_deriv(a, m, 1, _x); const double delta = f(0, 0) / (f(1, 0) - f(0, 0) * s); x[k] -= delta; if (std::abs(delta) < eps) break; ++j; } } return x; } //----------------------------------------------------------------------------- std::pair, std::vector> quadrature::compute_gauss_jacobi_rule(double a, int m) { /// @note Computes on [-1, 1] std::vector _pts = quadrature::compute_gauss_jacobi_points(a, m); auto pts = xt::adapt(_pts); const xt::xtensor Jd = xt::row(quadrature::compute_jacobi_deriv(a, m, 1, pts), 1); const double a1 = std::pow(2.0, a + 1.0); std::vector wts(m); for (int i = 0; i < m; ++i) { const double x = pts[i]; const double f = Jd[i]; wts[i] = a1 / (1.0 - x * x) / (f * f); } return {pts, wts}; } //----------------------------------------------------------------------------- std::pair, std::vector> quadrature::compute_gll_rule(int m) { // Implement the Gauss-Lobatto-Legendre quadrature rules on the interval // using Greg von Winckel's implementation. This facilitates implementing // spectral elements // The quadrature rule uses m points for a degree of precision of 2m-3. if (m < 2) { throw std::runtime_error( "Gauss-Lobatto-Legendre quadrature invalid for fewer than 2 points"); } // Calculate the recursion coefficients auto [alpha, beta] = rec_jacobi(m, 0.0, 0.0); // Compute Lobatto nodes and weights auto [xs_ref, ws_ref] = lobatto(alpha, beta, -1.0, 1.0); return {xt::adapt(xs_ref), ws_ref}; } //----------------------------------------------------------------------------- std::pair, std::vector> quadrature::make_quadrature_line(int m) { auto [ptx, wx] = quadrature::compute_gauss_jacobi_rule(0.0, m); std::transform(wx.begin(), wx.end(), wx.begin(), [](auto x) { return 0.5 * x; }); return {0.5 * (ptx + 1.0), wx}; } //----------------------------------------------------------------------------- std::pair, std::vector> quadrature::make_gll_line(int m) { auto [ptx, wx] = quadrature::compute_gll_rule(m); std::transform(wx.begin(), wx.end(), wx.begin(), [](auto x) { return 0.5 * x; }); return {0.5 * (ptx + 1.0), wx}; } //----------------------------------------------------------------------------- std::pair, std::vector> quadrature::make_quadrature_triangle_collapsed(std::size_t m) { auto [ptx, wx] = quadrature::compute_gauss_jacobi_rule(0.0, m); auto [pty, wy] = quadrature::compute_gauss_jacobi_rule(1.0, m); xt::xtensor pts({m * m, 2}); std::vector wts(m * m); int c = 0; for (std::size_t i = 0; i < m; ++i) { for (std::size_t j = 0; j < m; ++j) { pts(c, 0) = 0.25 * (1.0 + ptx[i]) * (1.0 - pty[j]); pts(c, 1) = 0.5 * (1.0 + pty[j]); wts[c] = wx[i] * wy[j] * 0.125; ++c; } } return {pts, wts}; } //----------------------------------------------------------------------------- std::pair, std::vector> quadrature::make_quadrature_tetrahedron_collapsed(std::size_t m) { auto [ptx, wx] = quadrature::compute_gauss_jacobi_rule(0.0, m); auto [pty, wy] = quadrature::compute_gauss_jacobi_rule(1.0, m); auto [ptz, wz] = quadrature::compute_gauss_jacobi_rule(2.0, m); xt::xtensor pts({m * m * m, 3}); std::vector wts(m * m * m); int c = 0; for (std::size_t i = 0; i < m; ++i) { for (std::size_t j = 0; j < m; ++j) { for (std::size_t k = 0; k < m; ++k) { pts(c, 0) = 0.125 * (1.0 + ptx[i]) * (1.0 - pty[j]) * (1.0 - ptz[k]); pts(c, 1) = 0.25 * (1. + pty[j]) * (1. - ptz[k]); pts(c, 2) = 0.5 * (1.0 + ptz[k]); wts[c] = wx[i] * wy[j] * wz[k] * 0.125 * 0.125; ++c; } } } return {pts, wts}; } //----------------------------------------------------------------------------- std::pair, std::vector> quadrature::make_quadrature(const std::string& rule, cell::type celltype, int m) { if (rule == "" or rule == "default") { if (celltype == cell::type::triangle) return make_default_triangle_quadrature(m); else if (celltype == cell::type::tetrahedron) return make_default_tetrahedron_quadrature(m); else { const int np = (m + 2) / 2; return make_gauss_jacobi_quadrature(celltype, np); } } else if (rule == "Gauss-Jacobi") { const int np = (m + 2) / 2; return make_gauss_jacobi_quadrature(celltype, np); } else if (rule == "GLL") { const int np = (m + 4) / 2; return make_gll_quadrature(celltype, np); } else if (rule == "Xiao-Gimbutas") { if (celltype == cell::type::triangle) return make_xiao_gimbutas_triangle_quadrature(m); else throw std::runtime_error( "Xiao-Gimbutas is only implemented for triangles."); } else throw std::runtime_error("Unknown quadrature rule \"" + rule + "\""); } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/quadrature.h000066400000000000000000000056031411115224000170310ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include "cell.h" #include #include #include #include /// basix /// Integration using Gauss-Jacobi quadrature on simplices. Other shapes /// can be obtained by using a product. /// @todo - pyramid namespace basix::quadrature { /// Evaluate the nth Jacobi polynomial and derivatives with weight /// parameters (a, 0) at points x /// @param[in] a Jacobi weight a /// @param[in] n Order of polynomial /// @param[in] nderiv Number of derivatives (if zero, just compute /// polynomial itself) /// @param[in] x Points at which to evaluate /// @returns Array of polynomial derivative values (rows) at points /// (columns) xt::xtensor compute_jacobi_deriv(double a, std::size_t n, std::size_t nderiv, const xtl::span& x); // Computes Gauss-Jacobi quadrature points /// Finds the m roots of \f$P_{m}^{a,0}\f$ on [-1,1] by Newton's method. /// @param[in] a weight in Jacobi (b=0) /// @param[in] m order /// @return list of points in 1D std::vector compute_gauss_jacobi_points(double a, int m); /// Gauss-Jacobi quadrature rule (points and weights) std::pair, std::vector> compute_gauss_jacobi_rule(double a, int m); /// Compute line quadrature rule on [0, 1] /// @param m order /// @returns list of points, list of weights std::pair, std::vector> make_quadrature_line(int m); /// Compute triangle quadrature rule on [0, 1]x[0, 1] /// @param[in] m order /// @returns list of points, list of weights std::pair, std::vector> make_quadrature_triangle_collapsed(std::size_t m); /// Compute tetrahedron quadrature rule on [0, 1]x[0, 1]x[0, 1] /// @param[in] m order /// @returns List of points, list of weights. The number of points /// arrays has shape (num points, gdim) std::pair, std::vector> make_quadrature_tetrahedron_collapsed(std::size_t m); /// Utility for quadrature rule on reference cell /// @param[in] rule Name of quadrature rule (or use "default") /// @param[in] celltype /// @param[in] m Maximum degree of polynomial that this quadrature rule /// will integrate exactly /// @returns List of points and list of weights. The number of points /// arrays has shape (num points, gdim) std::pair, std::vector> make_quadrature(const std::string& rule, cell::type celltype, int m); /// Compute GLL line quadrature rule on [0, 1] /// @param m order /// @returns list of 1D points, list of weights std::pair, std::vector> make_gll_line(int m); /// GLL quadrature rule (points and weights) std::pair, std::vector> compute_gll_rule(int m); } // namespace basix::quadrature basix-0.3.0/cpp/basix/raviart-thomas.cpp000066400000000000000000000100041411115224000201370ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson & Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #include "raviart-thomas.h" #include "element-families.h" #include "lagrange.h" #include "maps.h" #include "moments.h" #include "polyset.h" #include "quadrature.h" #include #include #include #include #include #include using namespace basix; //---------------------------------------------------------------------------- FiniteElement basix::create_rt(cell::type celltype, int degree) { if (celltype != cell::type::triangle and celltype != cell::type::tetrahedron) throw std::runtime_error("Unsupported cell type"); const std::size_t tdim = cell::topological_dimension(celltype); const cell::type facettype = (tdim == 2) ? cell::type::interval : cell::type::triangle; // The number of order (degree-1) scalar polynomials const std::size_t nv = polyset::dim(celltype, degree - 1); // The number of order (degree-2) scalar polynomials const std::size_t ns0 = polyset::dim(celltype, degree - 2); // The number of additional polynomials in the polynomial basis for // Raviart-Thomas const std::size_t ns = polyset::dim(facettype, degree - 1); // Evaluate the expansion polynomials at the quadrature points const auto [pts, _wts] = quadrature::make_quadrature("default", celltype, 2 * degree); auto wts = xt::adapt(_wts); const auto phi = xt::view(polyset::tabulate(celltype, degree, 0, pts), 0, xt::all(), xt::all()); // The number of order (degree) polynomials const std::size_t psize = phi.shape(1); // Create coefficients for order (degree-1) vector polynomials xt::xtensor B = xt::zeros({nv * tdim + ns, psize * tdim}); for (std::size_t j = 0; j < tdim; ++j) { xt::view(B, xt::range(nv * j, nv * j + nv), xt::range(psize * j, psize * j + nv)) = xt::eye(nv); } // Create coefficients for additional polynomials in Raviart-Thomas // polynomial basis for (std::size_t i = 0; i < ns; ++i) { auto p = xt::col(phi, ns0 + i); for (std::size_t k = 0; k < psize; ++k) { auto pk = xt::col(phi, k); for (std::size_t j = 0; j < tdim; ++j) { B(nv * tdim + i, k + psize * j) = xt::sum(wts * p * xt::col(pts, j) * pk)(); } } } // quadrature degree const int quad_deg = 5 * degree; std::array>, 4> M; std::array>, 4> x; // Add integral moments on facets const FiniteElement facet_moment_space = create_dlagrange(facettype, degree - 1); std::tie(x[tdim - 1], M[tdim - 1]) = moments::make_normal_integral_moments( facet_moment_space, celltype, tdim, quad_deg); xt::xtensor facet_transforms = moments::create_normal_moment_dof_transformations(facet_moment_space); // Add integral moments on interior if (degree > 1) { // Interior integral moment std::tie(x[tdim], M[tdim]) = moments::make_integral_moments( create_dlagrange(celltype, degree - 2), celltype, tdim, quad_deg); } const std::vector>> topology = cell::topology(celltype); std::map> entity_transformations; if (tdim == 2) { entity_transformations[cell::type::interval] = facet_transforms; } else if (tdim == 3) { entity_transformations[cell::type::interval] = xt::xtensor({1, 0, 0}); entity_transformations[cell::type::triangle] = facet_transforms; } xt::xtensor coeffs = compute_expansion_coefficients( celltype, B, {M[tdim - 1], M[tdim]}, {x[tdim - 1], x[tdim]}, degree); return FiniteElement(element::family::RT, celltype, degree, {tdim}, coeffs, entity_transformations, x, M, maps::type::contravariantPiola); } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/raviart-thomas.h000066400000000000000000000005041411115224000176100ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include "cell.h" #include "finite-element.h" namespace basix { /// Create Raviart-Thomas element /// @param celltype /// @param degree FiniteElement create_rt(cell::type celltype, int degree); } // namespace basix basix-0.3.0/cpp/basix/regge.cpp000066400000000000000000000152521411115224000163010ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT #include "regge.h" #include "dof-transformations.h" #include "element-families.h" #include "lattice.h" #include "maps.h" #include "polyset.h" #include #include #include using namespace basix; namespace { //----------------------------------------------------------------------------- xt::xtensor create_regge_space(cell::type celltype, int degree) { if (celltype != cell::type::triangle and celltype != cell::type::tetrahedron) throw std::runtime_error("Unsupported celltype"); const int tdim = cell::topological_dimension(celltype); const int nc = tdim * (tdim + 1) / 2; const int basis_size = polyset::dim(celltype, degree); const std::size_t ndofs = basis_size * nc; const std::size_t psize = basis_size * tdim * tdim; xt::xtensor wcoeffs = xt::zeros({ndofs, psize}); int s = basis_size; for (int i = 0; i < tdim; ++i) { for (int j = 0; j < tdim; ++j) { int xoff = i + tdim * j; int yoff = i + j; if (tdim == 3 and i > 0 and j > 0) ++yoff; xt::view(wcoeffs, xt::range(yoff * s, yoff * s + s), xt::range(xoff * s, xoff * s + s)) = xt::eye(s); } } return wcoeffs; } //----------------------------------------------------------------------------- std::pair>, 4>, std::array>, 4>> create_regge_interpolation(cell::type celltype, int degree) { const std::size_t tdim = cell::topological_dimension(celltype); const std::vector>> topology = cell::topology(celltype); const xt::xtensor geometry = cell::geometry(celltype); std::array>, 4> M; std::array>, 4> x; // Loop over edge and higher dimension entities for (std::size_t d = 1; d < topology.size(); ++d) { x[d].resize(topology[d].size()); M[d].resize(topology[d].size()); // Loop over entities of dimension dim for (std::size_t e = 0; e < topology[d].size(); ++e) { // Entity coordinates const xt::xtensor entity_x = cell::sub_entity_geometry(celltype, d, e); // Tabulate points in lattice cell::type ct = cell::sub_entity_type(celltype, d, e); const auto lattice = lattice::create(ct, degree + 2, lattice::type::equispaced, false); const auto x0 = xt::row(entity_x, 0); x[d][e] = xt::xtensor({lattice.shape(0), tdim}); // Copy points for (std::size_t p = 0; p < lattice.shape(0); ++p) { xt::row(x[d][e], p) = x0; for (std::size_t k = 0; k < entity_x.shape(0) - 1; ++k) { xt::row(x[d][e], p) += (xt::row(entity_x, k + 1) - x0) * lattice(p, k); } } // Store up outer(t, t) for all tangents const std::vector& vert_ids = topology[d][e]; const std::size_t ntangents = d * (d + 1) / 2; xt::xtensor vvt( {ntangents, geometry.shape(1), geometry.shape(1)}); std::vector _edge(geometry.shape(1)); auto edge_t = xt::adapt(_edge); int c = 0; for (std::size_t s = 0; s < d; ++s) { for (std::size_t r = s + 1; r < d + 1; ++r) { for (std::size_t p = 0; p < geometry.shape(1); ++p) edge_t[p] = geometry(vert_ids[r], p) - geometry(vert_ids[s], p); // outer product v.v^T auto result = xt::linalg::outer(edge_t, edge_t); xt::view(vvt, c, xt::all(), xt::all()).assign(result); ++c; } } M[d][e] = xt::zeros( {lattice.shape(0) * ntangents, tdim * tdim, lattice.shape(0)}); for (std::size_t p = 0; p < lattice.shape(0); ++p) { for (std::size_t j = 0; j < ntangents; ++j) { auto vvt_flat = xt::ravel(xt::view(vvt, j, xt::all(), xt::all())); for (std::size_t i = 0; i < tdim * tdim; ++i) M[d][e](p * ntangents + j, i, p) = vvt_flat(i); } } } } return {x, M}; } //----------------------------------------------------------------------------- } // namespace //----------------------------------------------------------------------------- FiniteElement basix::create_regge(cell::type celltype, int degree) { const std::size_t tdim = cell::topological_dimension(celltype); const xt::xtensor wcoeffs = create_regge_space(celltype, degree); const auto [x, M] = create_regge_interpolation(celltype, degree); const xt::xtensor coeffs = compute_expansion_coefficients( celltype, wcoeffs, {M[1], M[2], M[3]}, {x[1], x[2], x[3]}, degree); // Regge has (d+1) dofs on each edge, 3d(d+1)/2 on each face // and d(d-1)(d+1) on the interior in 3D const std::vector>> topology = cell::topology(celltype); std::map> entity_transformations; const std::vector edge_ref = doftransforms::interval_reflection(degree + 1); const std::array e_shape = {1, edge_ref.size(), edge_ref.size()}; xt::xtensor et = xt::zeros(e_shape); for (std::size_t i = 0; i < edge_ref.size(); ++i) et(0, i, edge_ref[i]) = 1; entity_transformations[cell::type::interval] = et; if (tdim > 2) { const std::vector face_rot_perm = doftransforms::triangle_rotation(degree); const std::vector face_ref_perm = doftransforms::triangle_reflection(degree); const xt::xtensor sub_rot = {{0, 1, 0}, {0, 0, 1}, {1, 0, 0}}; const xt::xtensor sub_ref = {{0, 1, 0}, {1, 0, 0}, {0, 0, 1}}; const std::array f_shape = {2, face_ref_perm.size() * 3, face_ref_perm.size() * 3}; xt::xtensor face_trans = xt::zeros(f_shape); for (std::size_t i = 0; i < face_ref_perm.size(); ++i) { xt::view(face_trans, 0, xt::range(3 * i, 3 * i + 3), xt::range(3 * face_rot_perm[i], 3 * face_rot_perm[i] + 3)) = sub_rot; xt::view(face_trans, 1, xt::range(3 * i, 3 * i + 3), xt::range(3 * face_ref_perm[i], 3 * face_ref_perm[i] + 3)) = sub_ref; } entity_transformations[cell::type::triangle] = face_trans; } return FiniteElement(element::family::Regge, celltype, degree, {tdim, tdim}, coeffs, entity_transformations, x, M, maps::type::doubleCovariantPiola); } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/regge.h000066400000000000000000000004541411115224000157440ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include "finite-element.h" namespace basix { /// Create Regge element /// @param celltype /// @param degree FiniteElement create_regge(cell::type celltype, int degree); } // namespace basix basix-0.3.0/cpp/basix/serendipity.cpp000066400000000000000000000612711411115224000175510ustar00rootroot00000000000000// Copyright (c) 2021 Chris Richardson & Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #include "serendipity.h" #include "element-families.h" #include "lagrange.h" #include "lattice.h" #include "log.h" #include "maps.h" #include "moments.h" #include "polyset.h" #include "quadrature.h" #include #include #include #include #include #include using namespace basix; namespace { //---------------------------------------------------------------------------- xt::xtensor make_serendipity_space_2d(int degree) { const std::size_t ndofs = degree == 1 ? 4 : degree * (degree + 3) / 2 + 3; // Evaluate the expansion polynomials at the quadrature points auto [pts, _wts] = quadrature::make_quadrature( "default", cell::type::quadrilateral, 2 * degree); auto wts = xt::adapt(_wts); xt::xtensor Pq = xt::view(polyset::tabulate(cell::type::quadrilateral, degree, 0, pts), 0, xt::all(), xt::all()); xt::xtensor Pt = xt::view(polyset::tabulate(cell::type::triangle, degree, 0, pts), 0, xt::all(), xt::all()); const std::size_t psize = Pq.shape(1); const std::size_t nv = Pt.shape(1); // Create coefficients for order (degree) polynomials xt::xtensor wcoeffs = xt::zeros({ndofs, psize}); for (std::size_t i = 0; i < nv; ++i) { auto p_i = xt::col(Pt, i); for (std::size_t k = 0; k < psize; ++k) wcoeffs(i, k) = xt::sum(wts * p_i * xt::col(Pq, k))(); } auto q0 = xt::col(pts, 0); auto q1 = xt::col(pts, 1); if (degree == 1) { for (std::size_t k = 0; k < psize; ++k) wcoeffs(nv, k) = xt::sum(wts * q0 * q1 * xt::col(Pq, k))(); return wcoeffs; } xt::xtensor integrand; for (std::size_t k = 0; k < psize; ++k) { auto pk = xt::col(Pq, k); for (std::size_t a = 0; a < 2; ++a) { auto q_a = xt::col(pts, a); integrand = wts * q0 * q1 * pk; for (int i = 1; i < degree; ++i) integrand *= q_a; wcoeffs(nv + a, k) = xt::sum(integrand)(); } } return wcoeffs; } //---------------------------------------------------------------------------- std::vector> serendipity_3d_indices(int total, int linear, std::vector done = {}) { if (done.size() == 3) { int count = 0; for (int i = 0; i < 3; ++i) if (done[i] == 1) ++count; if (count >= linear) return {{done[0], done[1], done[2]}}; return {}; } else if (done.size() == 2) { return serendipity_3d_indices( total, linear, {done[0], done[1], total - done[0] - done[1]}); } std::vector new_done(done.size() + 1); int sum_done = 0; for (std::size_t i = 0; i < done.size(); ++i) { new_done[i] = done[i]; sum_done += done[i]; } std::vector> out; for (int i = 0; i <= total - sum_done; ++i) { new_done[done.size()] = i; for (std::array j : serendipity_3d_indices(total, linear, new_done)) out.push_back(j); } return out; } //---------------------------------------------------------------------------- xt::xtensor make_serendipity_space_3d(int degree) { const std::size_t ndofs = degree < 4 ? 12 * degree - 4 : (degree < 6 ? 3 * degree * degree - 3 * degree + 14 : degree * (degree - 1) * (degree + 1) / 6 + degree * degree + 5 * degree + 4); // Number of order (degree) polynomials // Evaluate the expansion polynomials at the quadrature points auto [pts, _wts] = quadrature::make_quadrature( "default", cell::type::hexahedron, 2 * degree); auto wts = xt::adapt(_wts); xt::xtensor Ph = xt::view(polyset::tabulate(cell::type::hexahedron, degree, 0, pts), 0, xt::all(), xt::all()); xt::xtensor Pt = xt::view(polyset::tabulate(cell::type::tetrahedron, degree, 0, pts), 0, xt::all(), xt::all()); const std::size_t psize = Ph.shape(1); const std::size_t nv = Pt.shape(1); // Create coefficients for order (degree) polynomials xt::xtensor wcoeffs = xt::zeros({ndofs, psize}); for (std::size_t i = 0; i < nv; ++i) { auto p_i = xt::col(Pt, i); for (std::size_t k = 0; k < psize; ++k) wcoeffs(i, k) = xt::sum(wts * p_i * xt::col(Ph, k))(); } std::size_t c = nv; xt::xtensor integrand; std::vector> indices; for (std::size_t s = 1; s <= 3; ++s) { indices = serendipity_3d_indices(s + degree, s); for (std::array i : indices) { for (std::size_t k = 0; k < psize; ++k) { integrand = wts * xt::col(Ph, k); for (int d = 0; d < 3; ++d) { auto q_d = xt::col(pts, d); for (int j = 0; j < i[d]; ++j) integrand *= q_d; } wcoeffs(c, k) = xt::sum(integrand)(); } ++c; } } return wcoeffs; } //---------------------------------------------------------------------------- xt::xtensor make_serendipity_div_space_2d(int degree) { const std::size_t ndofs = degree * (degree + 3) + 4; // Evaluate the expansion polynomials at the quadrature points auto [pts, _wts] = quadrature::make_quadrature( "default", cell::type::quadrilateral, 2 * degree + 2); auto wts = xt::adapt(_wts); xt::xtensor Pq = xt::view( polyset::tabulate(cell::type::quadrilateral, degree + 1, 0, pts), 0, xt::all(), xt::all()); xt::xtensor Pt = xt::view(polyset::tabulate(cell::type::triangle, degree, 0, pts), 0, xt::all(), xt::all()); const std::size_t psize = Pq.shape(1); const std::size_t nv = Pt.shape(1); // Create coefficients for order (degree) vector polynomials xt::xtensor wcoeffs = xt::zeros({ndofs, psize * 2}); for (std::size_t i = 0; i < nv; ++i) { for (int d = 0; d < 2; ++d) { auto p_i = xt::col(Pt, i); for (std::size_t k = 0; k < psize; ++k) { wcoeffs(d * nv + i, d * psize + k) = xt::sum(wts * p_i * xt::col(Pq, k))(); } } } auto q0 = xt::col(pts, 0); auto q1 = xt::col(pts, 1); xt::xtensor integrand; for (std::size_t k = 0; k < psize; ++k) { auto pk = xt::col(Pq, k); for (std::size_t d = 0; d < 2; ++d) { for (std::size_t a = 0; a < 2; ++a) { auto q_a = xt::col(pts, a); integrand = wts * pk; if (a == 0 and d == 0) integrand *= q0; else if (a == 0 and d == 1) integrand *= (degree + 1) * q1; else if (a == 1 and d == 0) integrand *= (degree + 1) * q0; else if (a == 1 and d == 1) integrand *= q1; for (int i = 0; i < degree; ++i) integrand *= q_a; wcoeffs(2 * nv + a, psize * d + k) = xt::sum(integrand)(); } } } return wcoeffs; } //---------------------------------------------------------------------------- xt::xtensor make_serendipity_div_space_3d(int degree) { const std::size_t ndofs = (degree + 1) * (degree * (degree + 5) + 12) / 2; // Evaluate the expansion polynomials at the quadrature points auto [pts, _wts] = quadrature::make_quadrature( "default", cell::type::hexahedron, 2 * degree + 2); auto wts = xt::adapt(_wts); xt::xtensor polyset_at_Qpts = xt::view(polyset::tabulate(cell::type::hexahedron, degree + 1, 0, pts), 0, xt::all(), xt::all()); xt::xtensor smaller_polyset_at_Qpts = xt::view(polyset::tabulate(cell::type::tetrahedron, degree, 0, pts), 0, xt::all(), xt::all()); const std::size_t psize = polyset_at_Qpts.shape(1); const std::size_t nv = smaller_polyset_at_Qpts.shape(1); // Create coefficients for order (degree) vector polynomials xt::xtensor wcoeffs = xt::zeros({ndofs, psize * 3}); for (std::size_t i = 0; i < nv; ++i) { for (int d = 0; d < 3; ++d) { auto p_i = xt::col(smaller_polyset_at_Qpts, i); for (std::size_t k = 0; k < psize; ++k) { wcoeffs(d * nv + i, d * psize + k) = xt::sum(wts * p_i * xt::col(polyset_at_Qpts, k))(); } } } auto q0 = xt::col(pts, 0); auto q1 = xt::col(pts, 1); auto q2 = xt::col(pts, 2); xt::xtensor integrand; for (std::size_t k = 0; k < psize; ++k) { auto pk = xt::col(polyset_at_Qpts, k); for (std::size_t d = 0; d < 3; ++d) { for (std::size_t a = 0; a < 3; ++a) { for (int index = 0; index <= degree; ++index) { auto q_a = xt::col(pts, a); integrand = wts * pk; if (a == 0) { if (d == 0) integrand *= -(degree + 2) * q0; else if (d == 1) integrand *= q1; else if (d == 2) integrand *= q2; for (int i = 0; i < index; ++i) integrand *= q1; for (int i = 0; i < degree - index; ++i) integrand *= q2; } else if (a == 1) { if (d == 0) integrand *= -q0; else if (d == 1) integrand *= (degree + 2) * q1; else if (d == 2) integrand *= -q2; for (int i = 0; i < index; ++i) integrand *= q0; for (int i = 0; i < degree - index; ++i) integrand *= q2; } else if (a == 2) { if (d == 0) integrand *= q0; else if (d == 1) integrand *= q1; else if (d == 2) integrand *= -(degree + 2) * q2; for (int i = 0; i < index; ++i) integrand *= q0; for (int i = 0; i < degree - index; ++i) integrand *= q1; } wcoeffs(3 * nv + 3 * index + a, psize * d + k) = xt::sum(integrand)(); } } } } return wcoeffs; } //---------------------------------------------------------------------------- xt::xtensor make_serendipity_curl_space_2d(int degree) { const std::size_t ndofs = degree * (degree + 3) + 4; // Evaluate the expansion polynomials at the quadrature points auto [pts, _wts] = quadrature::make_quadrature( "default", cell::type::quadrilateral, 2 * degree + 2); auto wts = xt::adapt(_wts); xt::xtensor polyset_at_Qpts = xt::view( polyset::tabulate(cell::type::quadrilateral, degree + 1, 0, pts), 0, xt::all(), xt::all()); xt::xtensor smaller_polyset_at_Qpts = xt::view(polyset::tabulate(cell::type::triangle, degree, 0, pts), 0, xt::all(), xt::all()); const std::size_t psize = polyset_at_Qpts.shape(1); const std::size_t nv = smaller_polyset_at_Qpts.shape(1); // Create coefficients for order (degree) vector polynomials xt::xtensor wcoeffs = xt::zeros({ndofs, psize * 2}); for (std::size_t i = 0; i < nv; ++i) { for (int d = 0; d < 2; ++d) { auto p_i = xt::col(smaller_polyset_at_Qpts, i); for (std::size_t k = 0; k < psize; ++k) { wcoeffs(d * nv + i, d * psize + k) = xt::sum(wts * p_i * xt::col(polyset_at_Qpts, k))(); } } } auto q0 = xt::col(pts, 0); auto q1 = xt::col(pts, 1); xt::xtensor integrand; for (std::size_t k = 0; k < psize; ++k) { auto pk = xt::col(polyset_at_Qpts, k); for (std::size_t d = 0; d < 2; ++d) { for (std::size_t a = 0; a < 2; ++a) { auto q_a = xt::col(pts, a); integrand = wts * pk; if (a == 0 and d == 0) integrand *= (degree + 1) * q1; else if (a == 0 and d == 1) integrand *= -q0; else if (a == 1 and d == 0) integrand *= q1; else if (a == 1 and d == 1) integrand *= -(degree + 1) * q0; for (int i = 0; i < degree; ++i) integrand *= q_a; wcoeffs(2 * nv + a, psize * d + k) = xt::sum(integrand)(); } } } return wcoeffs; } //---------------------------------------------------------------------------- xt::xtensor make_serendipity_curl_space_3d(int degree) { const std::size_t ndofs = degree <= 3 ? 6 * (degree * (degree + 1) + 2) : degree * (degree + 1) * (degree - 1) / 2 + 3 * (degree * (degree + 4) + 3); // Evaluate the expansion polynomials at the quadrature points auto [pts, _wts] = quadrature::make_quadrature( "default", cell::type::hexahedron, 2 * degree + 2); auto wts = xt::adapt(_wts); xt::xtensor polyset_at_Qpts = xt::view(polyset::tabulate(cell::type::hexahedron, degree + 1, 0, pts), 0, xt::all(), xt::all()); xt::xtensor smaller_polyset_at_Qpts = xt::view(polyset::tabulate(cell::type::tetrahedron, degree, 0, pts), 0, xt::all(), xt::all()); const std::size_t psize = polyset_at_Qpts.shape(1); const std::size_t nv = smaller_polyset_at_Qpts.shape(1); // Create coefficients for order (degree) vector polynomials xt::xtensor wcoeffs = xt::zeros({ndofs, psize * 3}); for (std::size_t i = 0; i < nv; ++i) { for (int d = 0; d < 3; ++d) { auto p_i = xt::col(smaller_polyset_at_Qpts, i); for (std::size_t k = 0; k < psize; ++k) { wcoeffs(d * nv + i, d * psize + k) = xt::sum(wts * p_i * xt::col(polyset_at_Qpts, k))(); } } } auto q0 = xt::col(pts, 0); auto q1 = xt::col(pts, 1); auto q2 = xt::col(pts, 2); xt::xtensor integrand; for (std::size_t k = 0; k < psize; ++k) { auto pk = xt::col(polyset_at_Qpts, k); for (std::size_t d = 0; d < 3; ++d) { for (std::size_t a = 0; a < (degree > 1 ? 3 : 2); ++a) { for (int index = 0; index <= degree; ++index) { auto q_a = xt::col(pts, a); integrand = wts * pk; if (a == 0) { if (d == 0) integrand *= q1 * q2; else if (d == 1) integrand *= 0; else if (d == 2) integrand *= -q0 * q1; for (int i = 0; i < index; ++i) integrand *= q0; for (int i = 0; i < degree - 1 - index; ++i) integrand *= q2; } else if (a == 1) { if (d == 0) integrand *= 0; else if (d == 1) integrand *= q0 * q2; else if (d == 2) integrand *= -q0 * q1; for (int i = 0; i < index; ++i) integrand *= q1; for (int i = 0; i < degree - 1 - index; ++i) integrand *= q2; } else if (a == 2) { if (d == 0) integrand *= q1 * q2; else if (d == 1) integrand *= -q0 * q2; else if (d == 2) integrand *= 0; for (int i = 0; i < index; ++i) integrand *= q0; for (int i = 0; i < degree - 1 - index; ++i) integrand *= q1; } wcoeffs(3 * nv + 3 * index + a, psize * d + k) = xt::sum(integrand)(); } } } } int c = 3 * nv + (degree > 1 ? 3 : 2) * degree; std::vector> indices; for (std::size_t s = 1; s <= 3; ++s) { indices = serendipity_3d_indices(s + degree + 1, s); for (std::array i : indices) { for (std::size_t k = 0; k < psize; ++k) { for (int d = 0; d < 3; ++d) { integrand = wts * xt::col(polyset_at_Qpts, k); for (int d2 = 0; d2 < 3; ++d2) { auto q_d2 = xt::col(pts, d2); if (d == d2) { integrand *= i[d2]; for (int j = 0; j < i[d2] - 1; ++j) integrand *= q_d2; } else { for (int j = 0; j < i[d2]; ++j) integrand *= q_d2; } } wcoeffs(c, psize * d + k) = xt::sum(integrand)(); } } ++c; } } return wcoeffs; } //---------------------------------------------------------------------------- } // namespace //---------------------------------------------------------------------------- FiniteElement basix::create_serendipity(cell::type celltype, int degree) { if (celltype != cell::type::interval and celltype != cell::type::quadrilateral and celltype != cell::type::hexahedron) { throw std::runtime_error("Invalid celltype"); } const std::vector>> topology = cell::topology(celltype); const std::size_t tdim = cell::topological_dimension(celltype); // Number of dofs and interpolation points int quad_deg = 5 * degree; std::array>, 4> M; std::array>, 4> x; // dim 0 (vertices) const xt::xtensor geometry = cell::geometry(celltype); const std::size_t num_vertices = geometry.shape(0); M[0] = std::vector>(num_vertices, xt::ones({1, 1, 1})); x[0].resize(geometry.shape(0)); for (std::size_t i = 0; i < x[0].size(); ++i) { x[0][i] = xt::reshape_view( xt::row(geometry, i), {static_cast(1), geometry.shape(1)}); } xt::xtensor edge_transforms, face_transforms; if (degree >= 2) { FiniteElement moment_space = create_dpc(cell::type::interval, degree - 2); std::tie(x[1], M[1]) = moments::make_integral_moments(moment_space, celltype, 1, quad_deg); if (tdim > 1) { edge_transforms = moments::create_dot_moment_dof_transformations(moment_space); } } if (tdim >= 2 and degree >= 4) { FiniteElement moment_space = create_dpc(cell::type::quadrilateral, degree - 4); std::tie(x[2], M[2]) = moments::make_integral_moments(moment_space, celltype, 1, quad_deg); if (tdim > 2) { face_transforms = moments::create_dot_moment_dof_transformations(moment_space); } } if (tdim == 3 and degree >= 6) { std::tie(x[3], M[3]) = moments::make_integral_moments( create_dpc(cell::type::hexahedron, degree - 6), celltype, 1, quad_deg); } xt::xtensor wcoeffs; if (tdim == 1) wcoeffs = xt::eye(degree + 1); else if (tdim == 2) wcoeffs = make_serendipity_space_2d(degree); else if (tdim == 3) wcoeffs = make_serendipity_space_3d(degree); std::map> entity_transformations; if (tdim >= 2) { if (degree < 2) entity_transformations[cell::type::interval] = xt::xtensor({1, 0, 0}); else entity_transformations[cell::type::interval] = edge_transforms; if (tdim == 3) { if (degree < 4) { entity_transformations[cell::type::quadrilateral] = xt::xtensor({2, 0, 0}); } else { entity_transformations[cell::type::quadrilateral] = face_transforms; } } } xt::xtensor coeffs = compute_expansion_coefficients( celltype, wcoeffs, {M[0], M[1], M[2], M[3]}, {x[0], x[1], x[2], x[3]}, degree); return FiniteElement(element::family::Serendipity, celltype, degree, {1}, coeffs, entity_transformations, x, M, maps::type::identity); } //----------------------------------------------------------------------------- FiniteElement basix::create_serendipity_div(cell::type celltype, int degree) { if (celltype != cell::type::interval and celltype != cell::type::quadrilateral and celltype != cell::type::hexahedron) { throw std::runtime_error("Invalid celltype"); } const std::vector>> topology = cell::topology(celltype); const std::size_t tdim = cell::topological_dimension(celltype); const cell::type facettype = (tdim == 2) ? cell::type::interval : cell::type::quadrilateral; // Number of dofs and interpolation points int quad_deg = 5 * degree; std::array>, 4> M; std::array>, 4> x; xt::xtensor facet_transforms; FiniteElement facet_moment_space = create_dpc(facettype, degree); std::tie(x[tdim - 1], M[tdim - 1]) = moments::make_normal_integral_moments( facet_moment_space, celltype, tdim, quad_deg); if (tdim > 1) { facet_transforms = moments::create_normal_moment_dof_transformations(facet_moment_space); } if (tdim >= 2 and degree >= 2) { FiniteElement cell_moment_space = create_dpc(celltype, degree - 2); std::tie(x[tdim], M[tdim]) = moments::make_integral_moments( cell_moment_space, celltype, tdim, quad_deg); } xt::xtensor wcoeffs; if (tdim == 1) wcoeffs = xt::eye(degree + 1); else if (tdim == 2) wcoeffs = make_serendipity_div_space_2d(degree); else if (tdim == 3) wcoeffs = make_serendipity_div_space_3d(degree); std::map> entity_transformations; if (tdim == 2) { entity_transformations[cell::type::interval] = facet_transforms; } else if (tdim == 3) { entity_transformations[cell::type::interval] = xt::xtensor({1, 0, 0}); entity_transformations[cell::type::quadrilateral] = facet_transforms; } xt::xtensor coeffs = compute_expansion_coefficients( celltype, wcoeffs, {M[tdim - 1], M[tdim]}, {x[tdim - 1], x[tdim]}, degree + 1); return FiniteElement(element::family::BDM, celltype, degree + 1, {tdim}, coeffs, entity_transformations, x, M, maps::type::contravariantPiola); } //----------------------------------------------------------------------------- FiniteElement basix::create_serendipity_curl(cell::type celltype, int degree) { if (celltype != cell::type::interval and celltype != cell::type::quadrilateral and celltype != cell::type::hexahedron) { throw std::runtime_error("Invalid celltype"); } const std::size_t tdim = cell::topological_dimension(celltype); // Evaluate the expansion polynomials at the quadrature points auto [Qpts, _wts] = quadrature::make_quadrature("default", celltype, 2 * degree); auto wts = xt::adapt(_wts); xt::xtensor polyset_at_Qpts = xt::view( polyset::tabulate(celltype, degree, 0, Qpts), 0, xt::all(), xt::all()); xt::xtensor wcoeffs; if (tdim == 1) wcoeffs = xt::eye(degree + 1); else if (tdim == 2) wcoeffs = make_serendipity_curl_space_2d(degree); else if (tdim == 3) wcoeffs = make_serendipity_curl_space_3d(degree); std::array>, 4> M; std::array>, 4> x; FiniteElement edge_moment_space = create_dpc(cell::type::interval, degree); std::tie(x[1], M[1]) = moments::make_tangent_integral_moments( edge_moment_space, celltype, tdim, 2 * degree + 2); xt::xtensor edge_transforms = moments::create_tangent_moment_dof_transformations(edge_moment_space); // Add integral moments on interior xt::xtensor face_transforms; if (degree >= 2) { // Face integral moment FiniteElement moment_space = create_dpc(cell::type::quadrilateral, degree - 2); std::tie(x[2], M[2]) = moments::make_integral_moments( moment_space, celltype, tdim, 2 * degree); if (tdim == 3) { face_transforms = moments::create_moment_dof_transformations(moment_space); if (degree >= 4) { // Interior integral moment std::tie(x[3], M[3]) = moments::make_integral_moments( create_dpc(cell::type::hexahedron, degree - 4), celltype, tdim, 2 * degree - 3); } } } const std::vector>> topology = cell::topology(celltype); std::map> entity_transformations; entity_transformations[cell::type::interval] = edge_transforms; if (tdim == 3) { if (degree <= 1) { entity_transformations[cell::type::quadrilateral] = xt::xtensor({2, 0, 0}); } else { entity_transformations[cell::type::quadrilateral] = face_transforms; } } xt::xtensor coeffs = compute_expansion_coefficients( celltype, wcoeffs, {M[1], M[2], M[3]}, {x[1], x[2], x[3]}, degree + 1); return FiniteElement(element::family::N2E, celltype, degree + 1, {tdim}, coeffs, entity_transformations, x, M, maps::type::covariantPiola); } //----------------------------------------------------------------------------- basix-0.3.0/cpp/basix/serendipity.h000066400000000000000000000016411411115224000172110ustar00rootroot00000000000000// Copyright (c) 2020 Matthew Scroggs // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #include "cell.h" #include "finite-element.h" namespace basix { /// Create a serendipity element on cell with given degree /// @param[in] celltype quadrilateral or hexahedral celltype /// @param[in] degree /// @return A FiniteElement FiniteElement create_serendipity(cell::type celltype, int degree); /// Create a serendipity H(div) element on cell with given degree /// @param[in] celltype quadrilateral or hexahedral celltype /// @param[in] degree /// @return A FiniteElement FiniteElement create_serendipity_div(cell::type celltype, int degree); /// Create a serendipity H(curl) element on cell with given degree /// @param[in] celltype quadrilateral or hexahedral celltype /// @param[in] degree /// @return A FiniteElement FiniteElement create_serendipity_curl(cell::type celltype, int degree); } // namespace basix basix-0.3.0/cpp/basix/version.h.in000066400000000000000000000004571411115224000167500ustar00rootroot00000000000000// Copyright (c) 2020 Chris Richardson // FEniCS Project // SPDX-License-Identifier: MIT #pragma once #define BASIX_VERSION @PROJECT_VERSION@ #define BASIX_VERSION_MAJOR @PROJECT_VERSION_MAJOR@ #define BASIX_VERSION_MINOR @PROJECT_VERSION_MINOR@ #define BASIX_VERSION_PATCH @PROJECT_VERSION_PATCH@ basix-0.3.0/doc/000077500000000000000000000000001411115224000133545ustar00rootroot00000000000000basix-0.3.0/doc/assets/000077500000000000000000000000001411115224000146565ustar00rootroot00000000000000basix-0.3.0/doc/assets/sty.css000066400000000000000000000012461411115224000162120ustar00rootroot00000000000000.fenicsmain, .fenicshead {padding:20px;} .fenicsheadwrapper {border-bottom:2px solid black;min-height:100px; background-image: url(https://fenicsproject.org/pub/graphics/fenics_logo.svg); background-size:70px; background-repeat:no-repeat;padding:0px 70px; background-position-y: center;} .fenicstitle, .fenicsnav {line-height:normal;font-weight:normal;} .fenicshead .fenicstitle {font-size:60px;text-align:center} .fenicshead .fenicstitle .fenicsversion {font-size:30px} .fenicstitle, .fenicsnav, .fenicshead {font-family:Georgia, Palatino, 'Palatino Linotype', Times, 'Times New Roman', serif} .fenicsnav {display:inline-block;font-size:20px;padding:0px 15px} basix-0.3.0/doc/cpp/000077500000000000000000000000001411115224000141365ustar00rootroot00000000000000basix-0.3.0/doc/cpp/Doxyfile000066400000000000000000003215271411115224000156560ustar00rootroot00000000000000# Doxyfile 1.8.13 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a double hash (##) is considered a comment and is placed in # front of the TAG it is preceding. # # All text after a single hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists, items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (\" \"). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all text # before the first occurrence of this tag. Doxygen uses libiconv (or the iconv # built into libc) for the transcoding. See http://www.gnu.org/software/libiconv # for the list of possible encodings. # The default value is: UTF-8. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded by # double-quotes, unless you are using Doxywizard) that should identify the # project for which the documentation is generated. This name is used in the # title of most generated pages and in a few other places. # The default value is: My Project. PROJECT_NAME = Basix # The PROJECT_NUMBER tag can be used to enter a project or revision number. This # could be handy for archiving the generated documentation or if some version # control system is used. PROJECT_NUMBER = $(BASIX_VERSION) # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a # quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = # With the PROJECT_LOGO tag one can specify a logo or an icon that is included # in the documentation. The maximum height of the logo should not exceed 55 # pixels and the maximum width should not exceed 200 pixels. Doxygen will copy # the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path # into which the generated documentation will be written. If a relative path is # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. OUTPUT_DIRECTORY = # If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and # will distribute the generated files over these directories. Enabling this # option can be useful when feeding doxygen a huge amount of source files, where # putting all generated files in the same directory would otherwise causes # performance problems for the file system. # The default value is: NO. CREATE_SUBDIRS = NO # If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII # characters to appear in the names of generated files. If set to NO, non-ASCII # characters will be escaped, for example _xE3_x81_x84 will be used for Unicode # U+3044. # The default value is: NO. ALLOW_UNICODE_NAMES = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, # Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), # Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, # Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), # Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, # Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, # Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, # Ukrainian and Vietnamese. # The default value is: English. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member # descriptions after the members that are listed in the file and class # documentation (similar to Javadoc). Set to NO to disable this. # The default value is: YES. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief # description of a member or function before the detailed description # # Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. # The default value is: YES. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator that is # used to form the text in various listings. Each string in this list, if found # as the leading text of the brief description, will be stripped from the text # and the result, after processing the whole list, is used as the annotated # text. Otherwise, the brief description is used as-is. If left blank, the # following values are used ($name is automatically replaced with the name of # the entity):The $name class, The $name widget, The $name file, is, provides, # specifies, contains, represents, a, an and the. ABBREVIATE_BRIEF = "The $name class" \ "The $name widget" \ "The $name file" \ is \ provides \ specifies \ contains \ represents \ a \ an \ the # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # doxygen will generate a detailed section even if there is only a brief # description. # The default value is: NO. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. # The default value is: NO. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path # before files name in the file list and in the header files. If set to NO the # shortest path that makes the file name unique will be used # The default value is: YES. FULL_PATH_NAMES = YES # The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. # Stripping is only done if one of the specified strings matches the left-hand # part of the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the path to # strip. # # Note that you can specify absolute paths here, but also relative paths, which # will be relative from the directory where doxygen is started. # This tag requires that the tag FULL_PATH_NAMES is set to YES. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the # path mentioned in the documentation of a class, which tells the reader which # header file to include in order to use a class. If left blank only the name of # the header file containing the class definition is used. Otherwise one should # specify the list of include paths that are normally passed to the compiler # using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but # less readable) file names. This can be useful is your file systems doesn't # support long names like on DOS, Mac, or CD-ROM. # The default value is: NO. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the # first line (until the first dot) of a Javadoc-style comment as the brief # description. If set to NO, the Javadoc-style will behave just like regular Qt- # style comments (thus requiring an explicit @brief command for a brief # description.) # The default value is: NO. JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first # line (until the first dot) of a Qt-style comment as the brief description. If # set to NO, the Qt-style will behave just like regular Qt-style comments (thus # requiring an explicit \brief command for a brief description.) # The default value is: NO. QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a # multi-line C++ special comment block (i.e. a block of //! or /// comments) as # a brief description. This used to be the default behavior. The new default is # to treat a multi-line C++ comment block as a detailed description. Set this # tag to YES if you prefer the old behavior instead. # # Note that setting this tag to YES also means that rational rose comments are # not recognized any more. # The default value is: NO. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the # documentation from any documented member that it re-implements. # The default value is: YES. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new # page for each member. If set to NO, the documentation of a member will be part # of the file/class/namespace that contains it. # The default value is: NO. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen # uses this value to replace tabs by spaces in code fragments. # Minimum value: 1, maximum value: 16, default value: 4. TAB_SIZE = 4 # This tag can be used to specify a number of aliases that act as commands in # the documentation. An alias has the form: # name=value # For example adding # "sideeffect=@par Side Effects:\n" # will allow you to put the command \sideeffect (or @sideeffect) in the # documentation, which will result in a user-defined paragraph with heading # "Side Effects:". You can put \n's in the value part of an alias to insert # newlines. ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding "class=itcl::class" # will allow you to use the command class in the itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources # only. Doxygen will then generate output that is more tailored for C. For # instance, some of the names that are used will be different. The list of all # members will be omitted, etc. # The default value is: NO. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or # Python sources only. Doxygen will then generate output that is more tailored # for that language. For instance, namespaces will be presented as packages, # qualified scopes will look different, etc. # The default value is: NO. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources. Doxygen will then generate output that is tailored for Fortran. # The default value is: NO. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for VHDL. # The default value is: NO. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and # language is one of the parsers supported by doxygen: IDL, Java, Javascript, # C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: # FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: # Fortran. In the later case the parser tries to guess whether the code is fixed # or free formatted code, this is the default for Fortran type files), VHDL. For # instance to make doxygen treat .inc files as Fortran files (default is PHP), # and .f files as C (default is Fortran), use: inc=Fortran f=C. # # Note: For files without extension you can use no_extension as a placeholder. # # Note that for custom extensions you also need to set FILE_PATTERNS otherwise # the files are not read by doxygen. EXTENSION_MAPPING = # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments # according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you can # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in # case of backward compatibilities issues. # The default value is: YES. MARKDOWN_SUPPORT = YES # When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up # to that level are automatically included in the table of contents, even if # they do not have an id attribute. # Note: This feature currently applies only to Markdown headings. # Minimum value: 0, maximum value: 99, default value: 0. # This tag requires that the tag MARKDOWN_SUPPORT is set to YES. TOC_INCLUDE_HEADINGS = 0 # When enabled doxygen tries to link words that correspond to documented # classes, or namespaces to their corresponding documentation. Such a link can # be prevented in individual cases by putting a % sign in front of the word or # globally by setting AUTOLINK_SUPPORT to NO. # The default value is: YES. AUTOLINK_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should set this # tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); # versus func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. # The default value is: NO. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. # The default value is: NO. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip (see: # http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen # will parse them like normal C++ but will assume all classes use public instead # of private inheritance when no explicit protection keyword is present. # The default value is: NO. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate # getter and setter methods for a property. Setting this option to YES will make # doxygen to replace the get and set methods by a property in the documentation. # This will only work if the methods are indeed getting or setting a simple # type. If this is not the case, or you want to show the methods anyway, you # should set this option to NO. # The default value is: YES. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. # The default value is: NO. DISTRIBUTE_GROUP_DOC = NO # If one adds a struct or class to a group and this option is enabled, then also # any nested class or struct is added to the same group. By default this option # is disabled and one has to add nested compounds explicitly via \ingroup. # The default value is: NO. GROUP_NESTED_COMPOUNDS = NO # Set the SUBGROUPING tag to YES to allow class member groups of the same type # (for instance a group of public functions) to be put as a subgroup of that # type (e.g. under the Public Functions section). Set it to NO to prevent # subgrouping. Alternatively, this can be done per class using the # \nosubgrouping command. # The default value is: YES. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions # are shown inside the group in which they are included (e.g. using \ingroup) # instead of on a separate page (for HTML and Man pages) or section (for LaTeX # and RTF). # # Note that this feature does not work in combination with # SEPARATE_MEMBER_PAGES. # The default value is: NO. INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions # with only public data fields or simple typedef fields will be shown inline in # the documentation of the scope in which they are defined (i.e. file, # namespace, or group documentation), provided this scope is documented. If set # to NO, structs, classes, and unions are shown on a separate page (for HTML and # Man pages) or section (for LaTeX and RTF). # The default value is: NO. INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or # enum is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically be # useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. # The default value is: NO. TYPEDEF_HIDES_STRUCT = NO # The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This # cache is used to resolve symbols given their name and scope. Since this can be # an expensive process and often the same symbol appears multiple times in the # code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small # doxygen will become slower. If the cache is too large, memory is wasted. The # cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range # is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 # symbols. At the end of a run doxygen will report the cache usage and suggest # the optimal cache size from a speed point of view. # Minimum value: 0, maximum value: 9, default value: 0. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in # documentation are documented, even if no documentation was available. Private # class members and static file members will be hidden unless the # EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. # Note: This will also disable the warnings about undocumented members that are # normally produced when WARNINGS is set to YES. # The default value is: NO. EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will # be included in the documentation. # The default value is: NO. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal # scope will be included in the documentation. # The default value is: NO. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES, all static members of a file will be # included in the documentation. # The default value is: NO. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined # locally in source files will be included in the documentation. If set to NO, # only classes defined in header files are included. Does not have any effect # for Java sources. # The default value is: YES. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. If set to YES, local methods, # which are defined in the implementation section but not in the interface are # included in the documentation. If set to NO, only methods in the interface are # included. # The default value is: NO. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base name of # the file that contains the anonymous namespace. By default anonymous namespace # are hidden. # The default value is: NO. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all # undocumented members inside documented classes or files. If set to NO these # members will be included in the various overviews, but no documentation # section is generated. This option has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. If set # to NO, these classes will be included in the various overviews. This option # has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend # (class|struct|union) declarations. If set to NO, these declarations will be # included in the documentation. # The default value is: NO. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any # documentation blocks found inside the body of a function. If set to NO, these # blocks will be appended to the function's detailed documentation block. # The default value is: NO. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation that is typed after a # \internal command is included. If the tag is set to NO then the documentation # will be excluded. Set it to YES to include the internal documentation. # The default value is: NO. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file # names in lower-case letters. If set to YES, upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. # The default value is: system dependent. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with # their full class and namespace scopes in the documentation. If set to YES, the # scope will be hidden. # The default value is: NO. HIDE_SCOPE_NAMES = NO # If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will # append additional text to a page's title, such as Class Reference. If set to # YES the compound reference will be hidden. # The default value is: NO. HIDE_COMPOUND_REFERENCE= NO # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of # the files that are included by a file in the documentation of that file. # The default value is: YES. SHOW_INCLUDE_FILES = YES # If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each # grouped member an include statement to the documentation, telling the reader # which file to include in order to use the member. # The default value is: NO. SHOW_GROUPED_MEMB_INC = NO # If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include # files with double quotes in the documentation rather than with sharp brackets. # The default value is: NO. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the # documentation for inline members. # The default value is: YES. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the # (detailed) documentation of file and class members alphabetically by member # name. If set to NO, the members will appear in declaration order. # The default value is: YES. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief # descriptions of file, namespace and class members alphabetically by member # name. If set to NO, the members will appear in declaration order. Note that # this will also influence the order of the classes in the class list. # The default value is: NO. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the # (brief and detailed) documentation of class members so that constructors and # destructors are listed first. If set to NO the constructors will appear in the # respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. # Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief # member documentation. # Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting # detailed member documentation. # The default value is: NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy # of group names into alphabetical order. If set to NO the group names will # appear in their defined order. # The default value is: NO. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by # fully-qualified names, including namespaces. If set to NO, the class list will # be sorted only by class name, not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the alphabetical # list. # The default value is: NO. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper # type resolution of all parameters of a function it will reject a match between # the prototype and the implementation of a member function even if there is # only one candidate or it is obvious which candidate to choose by doing a # simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still # accept a match between prototype and implementation in such cases. # The default value is: NO. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo # list. This list is created by putting \todo commands in the documentation. # The default value is: YES. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test # list. This list is created by putting \test commands in the documentation. # The default value is: YES. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug # list. This list is created by putting \bug commands in the documentation. # The default value is: YES. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) # the deprecated list. This list is created by putting \deprecated commands in # the documentation. # The default value is: YES. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional documentation # sections, marked by \if ... \endif and \cond # ... \endcond blocks. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the # initial value of a variable or macro / define can have for it to appear in the # documentation. If the initializer consists of more lines than specified here # it will be hidden. Use a value of 0 to hide initializers completely. The # appearance of the value of individual variables and macros / defines can be # controlled using \showinitializer or \hideinitializer command in the # documentation regardless of this setting. # Minimum value: 0, maximum value: 10000, default value: 30. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated at # the bottom of the documentation of classes and structs. If set to YES, the # list will mention the files that were used to generate the documentation. # The default value is: YES. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. This # will remove the Files entry from the Quick Index and from the Folder Tree View # (if specified). # The default value is: YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces # page. This will remove the Namespaces entry from the Quick Index and from the # Folder Tree View (if specified). # The default value is: YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command command input-file, where command is the value of the # FILE_VERSION_FILTER tag, and input-file is the name of an input file provided # by doxygen. Whatever the program writes to standard output is used as the file # version. For an example see the documentation. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. You can # optionally specify a file name after the option, if omitted DoxygenLayout.xml # will be used as the name of the layout file. # # Note that if you run doxygen from a directory containing a file called # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE # tag is left empty. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files containing # the reference definitions. This must be a list of .bib files. The .bib # extension is automatically appended if omitted. This requires the bibtex tool # to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. # For LaTeX the style of the bibliography can be controlled using # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the # search path. See also \cite for info how to create references. CITE_BIB_FILES = #--------------------------------------------------------------------------- # Configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated to # standard output by doxygen. If QUIET is set to YES this implies that the # messages are off. # The default value is: NO. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated to standard error (stderr) by doxygen. If WARNINGS is set to YES # this implies that the warnings are on. # # Tip: Turn warnings on while writing the documentation. # The default value is: YES. WARNINGS = YES # If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag # will automatically be disabled. # The default value is: YES. WARN_IF_UNDOCUMENTED = YES # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some parameters # in a documented function, or documenting parameters that don't exist or using # markup commands wrongly. # The default value is: YES. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return # value. If set to NO, doxygen will only warn about wrong or incomplete # parameter documentation, but not about the absence of documentation. # The default value is: NO. WARN_NO_PARAMDOC = NO # If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when # a warning is encountered. # The default value is: NO. WARN_AS_ERROR = YES # The WARN_FORMAT tag determines the format of the warning messages that doxygen # can produce. The string should contain the $file, $line, and $text tags, which # will be replaced by the file and line number from which the warning originated # and the warning text. Optionally the format may contain $version, which will # be replaced by the version of the file (if it could be obtained via # FILE_VERSION_FILTER) # The default value is: $file:$line: $text. WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning and error # messages should be written. If left blank the output is written to standard # error (stderr). WARN_LOGFILE = #--------------------------------------------------------------------------- # Configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag is used to specify the files and/or directories that contain # documented source files. You may enter file names like myfile.cpp or # directories like /usr/src/myproject. Separate the files or directories with # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. INPUT = ../../cpp/basix index.md # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv # documentation (see: http://www.gnu.org/software/libiconv) for the list of # possible encodings. # The default value is: UTF-8. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and # *.h) to filter out the source-files in the directories. # # Note that for custom extensions or not directly supported extensions you also # need to set EXTENSION_MAPPING for the extension otherwise the files are not # read by doxygen. # # If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, # *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, # *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, # *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, # *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf. FILE_PATTERNS = *.cpp *.h *.md # The RECURSIVE tag can be used to specify whether or not subdirectories should # be searched for input files as well. # The default value is: NO. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = ../../cpp/basix/loguru.hpp \ ../../cpp/basix/loguru.cpp \ ../../cpp/basix/span.hpp # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. # The default value is: NO. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories use the pattern */test/* EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or directories # that contain example code fragments that are included (see the \include # command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and # *.h) to filter out the source-files in the directories. If left blank all # files are included. EXAMPLE_PATTERNS = * # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude commands # irrespective of the value of the RECURSIVE tag. # The default value is: NO. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or directories # that contain images that are to be included in the documentation (see the # \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command: # # # # where is the value of the INPUT_FILTER tag, and is the # name of an input file. Doxygen will then use the output that the filter # program writes to standard output. If FILTER_PATTERNS is specified, this tag # will be ignored. # # Note that the filter must not add or remove lines; it is applied before the # code is scanned, but not when the output code is generated. If lines are added # or removed, the anchors will not be placed correctly. # # Note that for custom extensions or not directly supported extensions you also # need to set EXTENSION_MAPPING for the extension otherwise the files are not # properly processed by doxygen. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: pattern=filter # (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how # filters are used. If the FILTER_PATTERNS tag is empty or if none of the # patterns match the file name, INPUT_FILTER is applied. # # Note that for custom extensions or not directly supported extensions you also # need to set EXTENSION_MAPPING for the extension otherwise the files are not # properly processed by doxygen. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will also be used to filter the input files that are used for # producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). # The default value is: NO. FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) and # it is also possible to disable source filtering for a specific pattern using # *.ext= (so without naming a filter). # This tag requires that the tag FILTER_SOURCE_FILES is set to YES. FILTER_SOURCE_PATTERNS = # If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that # is part of the input, its contents will be placed on the main page # (index.html). This can be useful if you have a project on for instance GitHub # and want to reuse the introduction page also for the doxygen output. USE_MDFILE_AS_MAINPAGE = index.md #--------------------------------------------------------------------------- # Configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will be # generated. Documented entities will be cross-referenced with these sources. # # Note: To get rid of all source code in the generated output, make sure that # also VERBATIM_HEADERS is set to NO. # The default value is: NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body of functions, # classes and enums directly into the documentation. # The default value is: NO. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any # special comment blocks from generated source code fragments. Normal C, C++ and # Fortran comments will always remain visible. # The default value is: YES. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES then for each documented # function all documented functions referencing it will be listed. # The default value is: NO. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES then for each documented function # all documented entities called/used by that function will be listed. # The default value is: NO. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set # to YES then the hyperlinks from functions in REFERENCES_RELATION and # REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will # link to the documentation. # The default value is: YES. REFERENCES_LINK_SOURCE = YES # If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the # source code will show a tooltip with additional information such as prototype, # brief description and links to the definition and documentation. Since this # will make the HTML file larger and loading of large files a bit slower, you # can opt to disable this feature. # The default value is: YES. # This tag requires that the tag SOURCE_BROWSER is set to YES. SOURCE_TOOLTIPS = YES # If the USE_HTAGS tag is set to YES then the references to source code will # point to the HTML generated by the htags(1) tool instead of doxygen built-in # source browser. The htags tool is part of GNU's global source tagging system # (see http://www.gnu.org/software/global/global.html). You will need version # 4.8.6 or higher. # # To use it do the following: # - Install the latest version of global # - Enable SOURCE_BROWSER and USE_HTAGS in the config file # - Make sure the INPUT points to the root of the source tree # - Run doxygen as normal # # Doxygen will invoke htags (and that will in turn invoke gtags), so these # tools must be available from the command line (i.e. in the search path). # # The result: instead of the source browser generated by doxygen, the links to # source code will now point to the output of htags. # The default value is: NO. # This tag requires that the tag SOURCE_BROWSER is set to YES. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a # verbatim copy of the header file for each class for which an include is # specified. Set to NO to disable this. # See also: Section \class. # The default value is: YES. VERBATIM_HEADERS = YES # If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the # clang parser (see: http://clang.llvm.org/) for more accurate parsing at the # cost of reduced performance. This can be particularly helpful with template # rich C++ code for which doxygen's built-in parser lacks the necessary type # information. # Note: The availability of this option depends on whether or not doxygen was # generated with the -Duse-libclang=ON option for CMake. # The default value is: NO. CLANG_ASSISTED_PARSING = NO # If clang assisted parsing is enabled you can provide the compiler with command # line options that you would normally use when invoking the compiler. Note that # the include paths will already be set by doxygen for the files and directories # specified with INPUT and INCLUDE_PATH. # This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. CLANG_OPTIONS = #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all # compounds will be generated. Enable this if the project contains a lot of # classes, structs, unions or interfaces. # The default value is: YES. ALPHABETICAL_INDEX = YES # The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in # which the alphabetical index list will be split. # Minimum value: 1, maximum value: 20, default value: 5. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all classes will # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag # can be used to specify a prefix (or a list of prefixes) that should be ignored # while generating the index headers. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. IGNORE_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output # The default value is: YES. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for each # generated HTML page (for example: .htm, .php, .asp). # The default value is: .html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a user-defined HTML header file for # each generated HTML page. If the tag is left blank doxygen will generate a # standard header. # # To get valid HTML the header file that includes any scripts and style sheets # that doxygen needs, which is dependent on the configuration options used (e.g. # the setting GENERATE_TREEVIEW). It is highly recommended to start with a # default header using # doxygen -w html new_header.html new_footer.html new_stylesheet.css # YourConfigFile # and then modify the file new_header.html. See also section "Doxygen usage" # for information on how to generate the default header that doxygen normally # uses. # Note: The header is subject to change so you typically have to regenerate the # default header when upgrading to a newer version of doxygen. For a description # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_HEADER = header.html # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard # footer. See HTML_HEADER for more information on how to generate a default # footer and what special commands can be used inside the footer. See also # section "Doxygen usage" for information on how to generate the default footer # that doxygen normally uses. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading style # sheet that is used by each HTML page. It can be used to fine-tune the look of # the HTML output. If left blank doxygen will generate a default style sheet. # See also section "Doxygen usage" for information on how to generate the style # sheet that doxygen normally uses. # Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as # it is more robust and this tag (HTML_STYLESHEET) will in the future become # obsolete. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_STYLESHEET = # The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined # cascading style sheets that are included after the standard style sheets # created by doxygen. Using this option one can overrule certain style aspects. # This is preferred over using HTML_STYLESHEET since it does not replace the # standard style sheet and is therefore more robust against future updates. # Doxygen will copy the style sheet files to the output directory. # Note: The order of the extra style sheet files is of importance (e.g. the last # style sheet in the list overrules the setting of the previous ones in the # list). For an example see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that the # files will be copied as-is; there are no commands or markers available. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to # this color. Hue is specified as an angle on a colorwheel, see # http://en.wikipedia.org/wiki/Hue for more information. For instance the value # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 # purple, and 360 is red again. # Minimum value: 0, maximum value: 359, default value: 220. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors # in the HTML output. For a value of 0 the output will use grayscales only. A # value of 255 will produce the most vivid colors. # Minimum value: 0, maximum value: 255, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the # luminance component of the colors in the HTML output. Values below 100 # gradually make the output lighter, whereas values above 100 make the output # darker. The value divided by 100 is the actual gamma applied, so 80 represents # a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not # change the gamma. # Minimum value: 40, maximum value: 240, default value: 80. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting this # to YES can help to show when doxygen was last run and thus if the # documentation is up to date. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_TIMESTAMP = NO # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries # shown in the various tree structured indices initially; the user can expand # and collapse entries dynamically later on. Doxygen will expand the tree to # such a level that at most the specified number of entries are visible (unless # a fully collapsed tree already exceeds this amount). So setting the number of # entries 1 will produce a full collapsed tree by default. 0 is a special value # representing an infinite number of entries and will result in a full expanded # tree by default. # Minimum value: 0, maximum value: 9999, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development # environment (see: http://developer.apple.com/tools/xcode/), introduced with # OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a # Makefile in the HTML output directory. Running make will produce the docset in # that directory and running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at # startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_DOCSET = NO # This tag determines the name of the docset feed. A documentation feed provides # an umbrella under which multiple documentation sets from a single provider # (such as a company or product suite) can be grouped. # The default value is: Doxygen generated docs. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_FEEDNAME = "Doxygen generated docs" # This tag specifies a string that should uniquely identify the documentation # set bundle. This should be a reverse domain-name style string, e.g. # com.mycompany.MyDocSet. Doxygen will append .docset to the name. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_BUNDLE_ID = org.doxygen.Project # The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. # The default value is: org.doxygen.Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. # The default value is: Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop # (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on # Windows. # # The HTML Help Workshop contains a compiler that can convert all HTML output # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML # files are now used as the Windows 98 help format, and will replace the old # Windows help format (.hlp) on all Windows platforms in the future. Compressed # HTML files also contain an index, a table of contents, and you can search for # words in the documentation. The HTML workshop also contains a viewer for # compressed HTML files. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_HTMLHELP = NO # The CHM_FILE tag can be used to specify the file name of the resulting .chm # file. You can add a path in front of the file if the result should not be # written to the html output directory. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_FILE = # The HHC_LOCATION tag can be used to specify the location (absolute path # including file name) of the HTML help compiler (hhc.exe). If non-empty, # doxygen will try to run the HTML help compiler on the generated index.hhp. # The file has to be specified with full path. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. HHC_LOCATION = # The GENERATE_CHI flag controls if a separate .chi index file is generated # (YES) or that it should be included in the master .chm file (NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. GENERATE_CHI = NO # The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) # and project file content. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_INDEX_ENCODING = # The BINARY_TOC flag controls whether a binary table of contents is generated # (YES) or a normal table of contents (NO) in the .chm file. Furthermore it # enables the Previous and Next buttons. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members to # the table of contents of the HTML help documentation and to the tree view. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that # can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help # (.qch) of the generated HTML documentation. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify # the file name of the resulting .qch file. The path specified is relative to # the HTML output folder. # This tag requires that the tag GENERATE_QHP is set to YES. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace # (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual # Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- # folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom # Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- # filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom # Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- # filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's filter section matches. Qt Help Project / Filter Attributes (see: # http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_SECT_FILTER_ATTRS = # The QHG_LOCATION tag can be used to specify the location of Qt's # qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the # generated .qhp file. # This tag requires that the tag GENERATE_QHP is set to YES. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be # generated, together with the HTML files, they form an Eclipse help plugin. To # install this plugin and make it available under the help contents menu in # Eclipse, the contents of the directory containing the HTML and XML files needs # to be copied into the plugins directory of eclipse. The name of the directory # within the plugins directory should be the same as the ECLIPSE_DOC_ID value. # After copying Eclipse needs to be restarted before the help appears. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_ECLIPSEHELP = NO # A unique identifier for the Eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have this # name. Each documentation set should have its own identifier. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. ECLIPSE_DOC_ID = org.doxygen.Project # If you want full control over the layout of the generated HTML pages it might # be necessary to disable the index and replace it with your own. The # DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top # of each HTML page. A value of NO enables the index and the value YES disables # it. Since the tabs in the index contain the same information as the navigation # tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. If the tag # value is set to YES, a side panel will be generated containing a tree-like # index structure (just like the one that is generated for HTML Help). For this # to work a browser that supports JavaScript, DHTML, CSS and frames is required # (i.e. any modern browser). Windows users are probably better off using the # HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can # further fine-tune the look of the index. As an example, the default style # sheet generated by doxygen has an example that shows how to put an image at # the root of the tree instead of the PROJECT_NAME. Since the tree basically has # the same information as the tab index, you could consider setting # DISABLE_INDEX to YES when enabling this option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_TREEVIEW = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that # doxygen will group on one line in the generated HTML documentation. # # Note that a value of 0 will completely suppress the enum values from appearing # in the overview section. # Minimum value: 0, maximum value: 20, default value: 4. # This tag requires that the tag GENERATE_HTML is set to YES. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used # to set the initial width (in pixels) of the frame in which the tree is shown. # Minimum value: 0, maximum value: 1500, default value: 250. # This tag requires that the tag GENERATE_HTML is set to YES. TREEVIEW_WIDTH = 250 # If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to # external symbols imported via tag files in a separate window. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of LaTeX formulas included as images in # the HTML documentation. When you change the font size after a successful # doxygen run you need to manually remove any form_*.png images from the HTML # output directory to force them to be regenerated. # Minimum value: 8, maximum value: 50, default value: 10. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are not # supported properly for IE 6.0, but are supported on all modern browsers. # # Note that when changing this option you need to delete any form_*.png files in # the HTML output directory before the changes have effect. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see # http://www.mathjax.org) which uses client side Javascript for the rendering # instead of using pre-rendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path # to it using the MATHJAX_RELPATH option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. USE_MATHJAX = YES # When MathJax is enabled you can set the default output format to be used for # the MathJax output. See the MathJax site (see: # http://docs.mathjax.org/en/latest/output.html) for more details. # Possible values are: HTML-CSS (which is slower, but has the best # compatibility), NativeMML (i.e. MathML) and SVG. # The default value is: HTML-CSS. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_FORMAT = HTML-CSS # When MathJax is enabled you need to specify the location relative to the HTML # output directory using the MATHJAX_RELPATH option. The destination directory # should contain the MathJax.js script. For instance, if the mathjax directory # is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax # Content Delivery Network so you can quickly see the result without installing # MathJax. However, it is strongly recommended to install a local copy of # MathJax from http://www.mathjax.org before deployment. # The default value is: http://cdn.mathjax.org/mathjax/latest. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_RELPATH = https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1 # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax # extension names that should be enabled during MathJax rendering. For example # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_EXTENSIONS = # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces # of code that will be used on startup of the MathJax code. See the MathJax site # (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an # example see the documentation. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_CODEFILE = # When the SEARCHENGINE tag is enabled doxygen will generate a search box for # the HTML output. The underlying search engine uses javascript and DHTML and # should work on any modern browser. Note that when using HTML help # (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) # there is already a search function so this one should typically be disabled. # For large projects the javascript based search engine can be slow, then # enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to # search using the keyboard; to jump to the search box use + S # (what the is depends on the OS and browser, but it is typically # , /