pax_global_header 0000666 0000000 0000000 00000000064 14562440725 0014523 g ustar 00root root 0000000 0000000 52 comment=68d87db5f26158383ca5c7fe001bc64d1d816471
.codecov.yml 0000664 0000000 0000000 00000000014 14562440725 0013305 0 ustar 00root root 0000000 0000000 comment: no
.flake8 0000664 0000000 0000000 00000000167 14562440725 0012246 0 ustar 00root root 0000000 0000000 [flake8]
ignore = E203, E266, E501, W503, C901, E741
max-line-length = 88
max-complexity = 18
select = B,C,E,F,W,T4,B9
.gitattributes 0000664 0000000 0000000 00000001375 14562440725 0013770 0 ustar 00root root 0000000 0000000 *.bin filter=lfs diff=lfs merge=lfs -text
*.f3grid filter=lfs diff=lfs merge=lfs -text
*.fem filter=lfs diff=lfs merge=lfs -text
*.inp filter=lfs diff=lfs merge=lfs -text
*.med filter=lfs diff=lfs merge=lfs -text
*.msh filter=lfs diff=lfs merge=lfs -text
*.obj filter=lfs diff=lfs merge=lfs -text
*.ply filter=lfs diff=lfs merge=lfs -text
*.vtk filter=lfs diff=lfs merge=lfs -text
*.vtu filter=lfs diff=lfs merge=lfs -text
*.ugrid filter=lfs diff=lfs merge=lfs -text
*.mesh filter=lfs diff=lfs merge=lfs -text
*.meshb filter=lfs diff=lfs merge=lfs -text
*.tec filter=lfs diff=lfs merge=lfs -text
*.su2 filter=lfs diff=lfs merge=lfs -text
*.ele filter=lfs diff=lfs merge=lfs -text
*.node filter=lfs diff=lfs merge=lfs -text
*.vol filter=lfs diff=lfs merge=lfs -text
.github/ 0000775 0000000 0000000 00000000000 14562440725 0012427 5 ustar 00root root 0000000 0000000 .github/ISSUE_TEMPLATE/ 0000775 0000000 0000000 00000000000 14562440725 0014612 5 ustar 00root root 0000000 0000000 .github/ISSUE_TEMPLATE/bug_report.md 0000664 0000000 0000000 00000001217 14562440725 0017305 0 ustar 00root root 0000000 0000000 ---
name: Bug report
about: Create a report to help us improve
title: "[BUG]"
labels: Needs triage
assignees: ""
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
A minimal code example that reproduces the problem would be a big help if you can provide it. Attach a small mesh file if necessary.
**Diagnose**
I may ask you to cut and paste the output of the following command.
```
pip freeze | grep meshio
```
**Did I help?**
If I was able to resolve your problem, consider [sponsoring](https://github.com/sponsors/nschloe) my work on meshio, or [buy me a coffee](https://ko-fi.com/nschloe) to say thanks.
.github/ISSUE_TEMPLATE/feature_request.md 0000664 0000000 0000000 00000001314 14562440725 0020336 0 ustar 00root root 0000000 0000000 ---
name: Feature request
about: Suggest an idea for this project
title: "[REQUEST]"
labels: Needs triage
assignees: ''
---
Consider posting in https://github.com/nschloe/meshio/discussions for feedback before raising a feature request.
**How would you improve meshio?**
Give as much detail as you can. Example code of how you would like it to work would help.
**What problem does it solved for you?**
What problem do you have that this feature would solve? I may be able to suggest an existing way of solving it.
**Did I help**
If I was able to resolve your problem, consider [sponsoring](https://github.com/sponsors/nschloe) my work on meshio, or [buy me a coffee](https://ko-fi.com/nschloe) to say thanks.
.github/ISSUE_TEMPLATE/new_format.md 0000664 0000000 0000000 00000001375 14562440725 0017303 0 ustar 00root root 0000000 0000000 ---
name: New Format
about: Suggest support for a new mesh format
labels: new format
assignees: ''
---
Would you like support for a new mesh format in meshio? First check the existing issues, there are a number of format requests already. It's often easy to get rudimentary support for a format, so don't be afraid to start a PR!
Consider posting in https://github.com/nschloe/meshio/discussions for feedback before raising a feature request.
**Format specification?**
Most formats have a detailed specification somewhere online. Make sure to post a link to that.
**Did I help**
If I was able to resolve your problem, consider [sponsoring](https://github.com/sponsors/nschloe) my work on meshio, or [buy me a coffee](https://ko-fi.com/nschloe) to say thanks.
.github/workflows/ 0000775 0000000 0000000 00000000000 14562440725 0014464 5 ustar 00root root 0000000 0000000 .github/workflows/ci.yml 0000664 0000000 0000000 00000001725 14562440725 0015607 0 ustar 00root root 0000000 0000000 name: ci
on:
push:
branches:
- main
pull_request:
branches:
- main
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
- name: Run pre-commit
uses: pre-commit/action@v3.0.0
build:
needs: [lint]
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macOS-latest]
python-version: ["3.8", "3.12"]
steps:
- uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Checkout code
uses: nschloe/action-cached-lfs-checkout@v1
- name: Test with tox
run: |
pip install tox
tox -- --cov meshio --cov-report xml --cov-report term
- uses: codecov/codecov-action@v3
if: ${{ matrix.python-version == '3.10' && matrix.os == 'ubuntu-latest' }}
.gitignore 0000664 0000000 0000000 00000000305 14562440725 0013055 0 ustar 00root root 0000000 0000000 *.bin
*.dat
*.dato
*.e
*.geo
*.h5
*.h5m
*.msh
*.off
*.pvtu
*.pyc
*.xdmf
*.xmf
*.xml
.cache/
MANIFEST
README.rst
build/
dist/
doc/_build/
*.egg-info/
.pytest_cache/
.coverage
.tox/
foo.vtk
.vscode/
.pre-commit-config.yaml 0000664 0000000 0000000 00000000454 14562440725 0015353 0 ustar 00root root 0000000 0000000 repos:
- repo: https://github.com/PyCQA/isort
rev: 5.13.2
hooks:
- id: isort
- repo: https://github.com/psf/black
rev: 24.1.1
hooks:
- id: black
language_version: python3
- repo: https://github.com/PyCQA/flake8
rev: 7.0.0
hooks:
- id: flake8
CHANGELOG.md 0000664 0000000 0000000 00000004110 14562440725 0012674 0 ustar 00root root 0000000 0000000
# Changelog
This document only describes _breaking_ changes in meshio. If you are interested in bug
fixes, enhancements etc., best follow [the meshio project on
GitHub](https://github.com/nschloe/meshio).
## v5.1.0 (Dec 11, 2021)
- CellBlocks are no longer tuples, but classes. You can no longer iterate over them like
```python
for cell_type, cell_data in cells:
pass
```
Instead, use
```python
for cell_block in cells:
cell_block.type
cell_block.data
```
## v5.0.0 (Aug 06, 2021)
- meshio now only provides one command-line tool, `meshio`, with subcommands like
`info`, `convert`, etc. This replaces the former `meshio-info`, `meshio-convert` etc.
## v4.4.0 (Apr 29, 2021)
- Polygons are now stored as `"polygon"` cell blocks, not `"polygonN"` (where `N` is the
number of nodes per polygon). One can simply retrieve the number of points via
`cellblock.data.shape[1]`.
## v4.0.0 (Feb 18, 2020)
- `mesh.cells` used to be a dictionary of the form
```python
{
"triangle": [[0, 1, 2], [0, 2, 3]],
"quad": [[0, 7, 1, 10], ...]
}
```
From 4.0.0 on, `mesh.cells` is a list of tuples,
```python
[
("triangle", [[0, 1, 2], [0, 2, 3]]),
("quad", [[0, 7, 1, 10], ...])
]
```
This has the advantage that multiple blocks of the same cell type can be accounted
for. Also, cell ordering can be preserved.
You can now use the method `mesh.get_cells_type("triangle")` to get all cells of
`"triangle"` type, or use `mesh.cells_dict` to build the old dictionary structure.
- `mesh.cell_data` used to be a dictionary of the form
```python
{
"triangle": {"a": [0.5, 1.3], "b": [2.17, 41.3]},
"quad": {"a": [1.1, -0.3, ...], "b": [3.14, 1.61, ...]},
}
```
From 4.0.0 on, `mesh.cell_data` is a dictionary of lists,
```python
{
"a": [[0.5, 1.3], [1.1, -0.3, ...]],
"b": [[2.17, 41.3], [3.14, 1.61, ...]],
}
```
Each data list, e.g., `mesh.cell_data["a"]`, can be `zip`ped with `mesh.cells`.
An old-style `cell_data` dictionary can be retrieved via `mesh.cell_data_dict`.
CITATION.cff 0000664 0000000 0000000 00000000463 14562440725 0012764 0 ustar 00root root 0000000 0000000 cff-version: 1.2.0
message: "If you use this software, please cite it as below."
authors:
- family-names: "Schlömer"
given-names: "Nico"
orcid: "https://orcid.org/0000-0001-5228-0946"
title: "meshio: Tools for mesh files"
doi: 10.5281/zenodo.1173115
url: https://github.com/nschloe/meshio
license: MIT
CODE_OF_CONDUCT.md 0000664 0000000 0000000 00000006441 14562440725 0013673 0 ustar 00root root 0000000 0000000 # meshio Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies within all project spaces, and it also applies when
an individual is representing the project or its community in public spaces.
Examples of representing a project or community include using an official
project e-mail address, posting via an official social media account, or acting
as an appointed representative at an online or offline event. Representation of
a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at . All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct/
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq/
CONTRIBUTING.md 0000664 0000000 0000000 00000001433 14562440725 0013321 0 ustar 00root root 0000000 0000000 # meshio contributing guidelines
The meshio community appreciates your contributions via issues and
pull requests. Note that the [code of conduct](CODE_OF_CONDUCT.md)
applies to all interactions with the meshio project, including
issues and pull requests.
When submitting pull requests, please follow the style guidelines of
the project, ensure that your code is tested and documented, and write
good commit messages, e.g., following [these
guidelines](https://chris.beams.io/posts/git-commit/).
By submitting a pull request, you are licensing your code under the
project [license](LICENSE.txt) and affirming that you either own copyright
(automatic for most individuals) or are authorized to distribute under
the project license (e.g., in case your employer retains copyright on
your work).
LICENSE.txt 0000664 0000000 0000000 00000002105 14562440725 0012710 0 ustar 00root root 0000000 0000000 The MIT License (MIT)
Copyright (c) 2015-2021 Nico Schlömer et al.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
MANIFEST.in 0000664 0000000 0000000 00000000324 14562440725 0012624 0 ustar 00root root 0000000 0000000 include LICENSE.txt
# Don't recursively include everything that's in test; there may be cruft files,
# left-overs from experiments etc.
include tests/*.py
# include tests/meshes/
recursive-include tests/meshes *
README.md 0000664 0000000 0000000 00000021634 14562440725 0012354 0 ustar 00root root 0000000 0000000
I/O for mesh files.
[](https://pypi.org/project/meshio/)
[](https://anaconda.org/conda-forge/meshio/)
[](https://repology.org/project/python:meshio/versions)
[](https://pypi.org/project/meshio/)
[](https://doi.org/10.5281/zenodo.1173115)
[](https://github.com/nschloe/meshio)
[](https://pepy.tech/project/meshio)
[](https://discord.gg/Z6DMsJh4Hr)
[](https://github.com/nschloe/meshio/actions?query=workflow%3Aci)
[](https://app.codecov.io/gh/nschloe/meshio)
[](https://lgtm.com/projects/g/nschloe/meshio)
[](https://github.com/psf/black)
There are various mesh formats available for representing unstructured meshes.
meshio can read and write all of the following and smoothly converts between them:
> [Abaqus](http://abaqus.software.polimi.it/v6.14/index.html) (`.inp`),
> ANSYS msh (`.msh`),
> [AVS-UCD](https://lanl.github.io/LaGriT/pages/docs/read_avs.html) (`.avs`),
> [CGNS](https://cgns.github.io/) (`.cgns`),
> [DOLFIN XML](https://manpages.ubuntu.com/manpages/jammy/en/man1/dolfin-convert.1.html) (`.xml`),
> [Exodus](https://nschloe.github.io/meshio/exodus.pdf) (`.e`, `.exo`),
> [FLAC3D](https://www.itascacg.com/software/flac3d) (`.f3grid`),
> [H5M](https://www.mcs.anl.gov/~fathom/moab-docs/h5mmain.html) (`.h5m`),
> [Kratos/MDPA](https://github.com/KratosMultiphysics/Kratos/wiki/Input-data) (`.mdpa`),
> [Medit](https://people.sc.fsu.edu/~jburkardt/data/medit/medit.html) (`.mesh`, `.meshb`),
> [MED/Salome](https://docs.salome-platform.org/latest/dev/MEDCoupling/developer/med-file.html) (`.med`),
> [Nastran](https://help.autodesk.com/view/NSTRN/2019/ENU/?guid=GUID-42B54ACB-FBE3-47CA-B8FE-475E7AD91A00) (bulk data, `.bdf`, `.fem`, `.nas`),
> [Netgen](https://github.com/ngsolve/netgen) (`.vol`, `.vol.gz`),
> [Neuroglancer precomputed format](https://github.com/google/neuroglancer/tree/master/src/neuroglancer/datasource/precomputed#mesh-representation-of-segmented-object-surfaces),
> [Gmsh](https://gmsh.info/doc/texinfo/gmsh.html#File-formats) (format versions 2.2, 4.0, and 4.1, `.msh`),
> [OBJ](https://en.wikipedia.org/wiki/Wavefront_.obj_file) (`.obj`),
> [OFF](https://segeval.cs.princeton.edu/public/off_format.html) (`.off`),
> [PERMAS](https://www.intes.de) (`.post`, `.post.gz`, `.dato`, `.dato.gz`),
> [PLY]() (`.ply`),
> [STL]() (`.stl`),
> [Tecplot .dat](http://paulbourke.net/dataformats/tp/),
> [TetGen .node/.ele](https://wias-berlin.de/software/tetgen/fformats.html),
> [SVG](https://www.w3.org/TR/SVG/) (2D output only) (`.svg`),
> [SU2](https://su2code.github.io/docs_v7/Mesh-File/) (`.su2`),
> [UGRID](https://www.simcenter.msstate.edu/software/documentation/ug_io/3d_grid_file_type_ugrid.html) (`.ugrid`),
> [VTK](https://vtk.org/wp-content/uploads/2015/04/file-formats.pdf) (`.vtk`),
> [VTU](https://vtk.org/Wiki/VTK_XML_Formats) (`.vtu`),
> [WKT](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry) ([TIN](https://en.wikipedia.org/wiki/Triangulated_irregular_network)) (`.wkt`),
> [XDMF](https://xdmf.org/index.php/XDMF_Model_and_Format) (`.xdmf`, `.xmf`).
([Here's a little survey](https://forms.gle/PSeNb3N3gv3wbEus8) on which formats are actually
used.)
Install with one of
```
pip install meshio[all]
conda install -c conda-forge meshio
```
(`[all]` pulls in all optional dependencies. By default, meshio only uses numpy.)
You can then use the command-line tool
```sh
meshio convert input.msh output.vtk # convert between two formats
meshio info input.xdmf # show some info about the mesh
meshio compress input.vtu # compress the mesh file
meshio decompress input.vtu # decompress the mesh file
meshio binary input.msh # convert to binary format
meshio ascii input.msh # convert to ASCII format
```
with any of the supported formats.
In Python, simply do
```python
import meshio
mesh = meshio.read(
filename, # string, os.PathLike, or a buffer/open file
# file_format="stl", # optional if filename is a path; inferred from extension
# see meshio-convert -h for all possible formats
)
# mesh.points, mesh.cells, mesh.cells_dict, ...
# mesh.vtk.read() is also possible
```
to read a mesh. To write, do
```python
import meshio
# two triangles and one quad
points = [
[0.0, 0.0],
[1.0, 0.0],
[0.0, 1.0],
[1.0, 1.0],
[2.0, 0.0],
[2.0, 1.0],
]
cells = [
("triangle", [[0, 1, 2], [1, 3, 2]]),
("quad", [[1, 4, 5, 3]]),
]
mesh = meshio.Mesh(
points,
cells,
# Optionally provide extra data on points, cells, etc.
point_data={"T": [0.3, -1.2, 0.5, 0.7, 0.0, -3.0]},
# Each item in cell data must match the cells array
cell_data={"a": [[0.1, 0.2], [0.4]]},
)
mesh.write(
"foo.vtk", # str, os.PathLike, or buffer/open file
# file_format="vtk", # optional if first argument is a path; inferred from extension
)
# Alternative with the same options
meshio.write_points_cells("foo.vtk", points, cells)
```
For both input and output, you can optionally specify the exact `file_format`
(in case you would like to enforce ASCII over binary VTK, for example).
#### Time series
The [XDMF format](https://xdmf.org/index.php/XDMF_Model_and_Format) supports
time series with a shared mesh. You can write times series data using meshio
with
```python
with meshio.xdmf.TimeSeriesWriter(filename) as writer:
writer.write_points_cells(points, cells)
for t in [0.0, 0.1, 0.21]:
writer.write_data(t, point_data={"phi": data})
```
and read it with
```python
with meshio.xdmf.TimeSeriesReader(filename) as reader:
points, cells = reader.read_points_cells()
for k in range(reader.num_steps):
t, point_data, cell_data = reader.read_data(k)
```
### ParaView plugin
*A Gmsh file opened with ParaView.*
If you have downloaded a binary version of ParaView, you may proceed as follows.
- Install meshio for the Python major version that ParaView uses (check `pvpython --version`)
- Open ParaView
- Find the file `paraview-meshio-plugin.py` of your meshio installation (on Linux:
`~/.local/share/paraview-5.9/plugins/`) and load it under _Tools / Manage Plugins / Load New_
- _Optional:_ Activate _Auto Load_
You can now open all meshio-supported files in ParaView.
### Performance comparison
The comparisons here are for a triangular mesh with about 900k points and 1.8M
triangles. The red lines mark the size of the mesh in memory.
#### File sizes
#### I/O speed
#### Maximum memory usage
### Installation
meshio is [available from the Python Package Index](https://pypi.org/project/meshio/),
so simply run
```
pip install meshio
```
to install.
Additional dependencies (`netcdf4`, `h5py`) are required for some of the output formats
and can be pulled in by
```
pip install meshio[all]
```
You can also install meshio from [Anaconda](https://anaconda.org/conda-forge/meshio):
```
conda install -c conda-forge meshio
```
### Testing
To run the meshio unit tests, check out this repository and type
```
tox
```
### License
meshio is published under the [MIT license](https://en.wikipedia.org/wiki/MIT_License).
doc/ 0000775 0000000 0000000 00000000000 14562440725 0011634 5 ustar 00root root 0000000 0000000 doc/cell_types.tex 0000664 0000000 0000000 00000027776 14562440725 0014544 0 ustar 00root root 0000000 0000000 \documentclass[convert=pdf2svg]{standalone}
% \documentclass{article}
\usepackage[T1]{fontenc}
\usepackage{lmodern}
\renewcommand{\familydefault}{\sfdefault}
\usepackage{tikz}
\usepackage{tikz-3dplot}
\usetikzlibrary{external}
\tikzset{external/force remake}
\tikzset{external/disable dependency files}
\tikzset{external/aux in dpth={false}}
% uncomment this to generate a figure for each cell type (and change documentclass to article)
% \tikzexternalize
\tikzstyle{vertex} = [circle,draw=black,fill=black,scale = 0.5]
\tdplotsetmaincoords{70}{110}
% cnode(tag,x,y,z,label,label_pos)
\def\cnode(#1,#2,#3,#4,#5,#6){
\node (#1) at (#2,#3,#4) [vertex,label=#6:$\mathsf{#5}$] {};
}
\pagestyle{empty}
\begin{document}
\begin{tabular}{ccc}
vertex &
line &
line3
\\
\tikzsetnextfilename{vertex}
\begin{tikzpicture}[scale = 2]
% \useasboundingbox (-2.5,-2.5) (2.5,2.5);
\cnode(n0,0,0,0,0,below right);
\end{tikzpicture}
&
\tikzsetnextfilename{line}
\begin{tikzpicture}[scale = 2]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\draw (n0) -- (n1);
\end{tikzpicture}
&
\tikzsetnextfilename{line3}
\begin{tikzpicture}[scale = 2]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,1,0,0,2,below right);
\draw (n0) -- (n2) -- (n1);
\end{tikzpicture}
\\[1 em]
triangle &
triangle6 &
triangle7
\\
\tikzsetnextfilename{triangle}
\begin{tikzpicture}[scale = 2]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,0,2,0,2,right);
\draw (n0) -- (n1) -- (n2) -- (n0);
\end{tikzpicture}
&
\tikzsetnextfilename{triangle6}
\begin{tikzpicture}[scale = 2]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,0,2,0,2,right);
\cnode(n3,1,0,0,3,below right);
\cnode(n4,1,1,0,4,right);
\cnode(n5,0,1,0,5,below right);
\draw (n0) -- (n3) -- (n1) -- (n4) -- (n2) -- (n5) -- (n0);
\end{tikzpicture}
&
\tikzsetnextfilename{triangle7}
\begin{tikzpicture}[scale = 2]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,0,2,0,2,right);
\cnode(n3,1,0,0,3,below right);
\cnode(n4,1,1,0,4,right);
\cnode(n5,0,1,0,5,below right);
\cnode(n6,0.5,0.5,0,6,below right);
\draw (n0) -- (n3) -- (n1) -- (n4) -- (n2) -- (n5) -- (n0);
\end{tikzpicture}
\\[1 em]
quad &
quad8 &
quad9
\\
\tikzsetnextfilename{quad}
\begin{tikzpicture}[scale = 2]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,2,2,0,2,below right);
\cnode(n3,0,2,0,3,below right);
\draw (n0) -- (n1) -- (n2) -- (n3) -- (n0);
\end{tikzpicture}
&
\tikzsetnextfilename{quad8}
\begin{tikzpicture}[scale = 2]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,2,2,0,2,below right);
\cnode(n3,0,2,0,3,below right);
\cnode(n4,1,0,0,4,below right);
\cnode(n5,2,1,0,5,below right);
\cnode(n6,1,2,0,6,below right);
\cnode(n7,0,1,0,7,below right);
\draw (n0) -- (n4) -- (n1) -- (n5) -- (n2) -- (n6) -- (n3) -- (n7) -- (n0);
\end{tikzpicture}
&
\tikzsetnextfilename{quad9}
\begin{tikzpicture}[scale = 2]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,2,2,0,2,below right);
\cnode(n3,0,2,0,3,below right);
\cnode(n4,1,0,0,4,below right);
\cnode(n5,2,1,0,5,below right);
\cnode(n6,1,2,0,6,below right);
\cnode(n7,0,1,0,7,below right);
\cnode(n8,1,1,0,8,below right);
\draw (n0) -- (n4) -- (n1) -- (n5) -- (n2) -- (n6) -- (n3) -- (n7) -- (n0);
\end{tikzpicture}
\\[1 em]
tetra &
tetra10 &
hexahedron
\\
\tikzsetnextfilename{tetra}
\begin{tikzpicture}[scale = 2, tdplot_main_coords]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,0,2,0,2,below right);
\cnode(n3,0,0,2,3,right);
\draw (n0) -- (n1) -- (n2) -- (n0);
\draw (n0) -- (n3);
\draw (n1) -- (n3);
\draw (n2) -- (n3);
\end{tikzpicture}
&
\tikzsetnextfilename{tetra10} % VTK
\begin{tikzpicture}[scale = 2, tdplot_main_coords]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,0,2,0,2,below right);
\cnode(n3,0,0,2,3,right);
\cnode(n4,1,0,0,4,below right);
\cnode(n5,1,1,0,5,below right);
\cnode(n6,0,1,0,6,below right);
\cnode(n7,0,0,1,7,below right);
\cnode(n8,1,0,1,8,below right);
\cnode(n9,0,1,1,9,right);
\draw (n0) -- (n4) -- (n1) -- (n5) -- (n2) -- (n6) -- (n0);
\draw (n0) -- (n7) -- (n3);
\draw (n1) -- (n8) -- (n3);
\draw (n2) -- (n9) -- (n3);
\end{tikzpicture}
&
\tikzsetnextfilename{hexahedron}
\begin{tikzpicture}[scale = 2, tdplot_main_coords]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,2,2,0,2,below right);
\cnode(n3,0,2,0,3,below right);
\cnode(n4,0,0,2,4,below right);
\cnode(n5,2,0,2,5,below right);
\cnode(n6,2,2,2,6,below right);
\cnode(n7,0,2,2,7,below right);
\draw (n0) -- (n1) -- (n2) -- (n3) -- (n0);
\draw (n4) -- (n5) -- (n6) -- (n7) -- (n4);
\draw (n0) -- (n4);
\draw (n1) -- (n5);
\draw (n2) -- (n6);
\draw (n3) -- (n7);
\end{tikzpicture}
\\[1 em]
hexahedron20 &
hexahedron24 &
hexahedron27
\\
\tikzsetnextfilename{hexahedron20} % VTK != gmsh
\begin{tikzpicture}[scale = 2, tdplot_main_coords]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,2,2,0,2,below right);
\cnode(n3,0,2,0,3,below right);
\cnode(n4,0,0,2,4,below right);
\cnode(n5,2,0,2,5,below right);
\cnode(n6,2,2,2,6,below right);
\cnode(n7,0,2,2,7,below right);
\cnode(n8,1,0,0,8,below right);
\cnode(n9,2,1,0,9,below right);
\cnode(n10,1,2,0,10,below right);
\cnode(n11,0,1,0,11,below right);
\cnode(n12,1,0,2,12,below right);
\cnode(n13,2,1,2,13,below right);
\cnode(n14,1,2,2,14,below right);
\cnode(n15,0,1,2,15,below right);
\cnode(n16,0,0,1,16,below right);
\cnode(n17,2,0,1,17,below right);
\cnode(n18,2,2,1,18,below right);
\cnode(n19,0,2,1,19,below right);
\draw (n0) -- (n8) -- (n1) -- (n9) -- (n2) -- (n10) -- (n3) -- (n11) -- (n0);
\draw (n4) -- (n12) -- (n5) -- (n13) -- (n6) -- (n14) -- (n7) -- (n15) -- (n4);
\draw (n0) -- (n16) -- (n4);
\draw (n1) -- (n17) -- (n5);
\draw (n2) -- (n18) -- (n6);
\draw (n3) -- (n19) -- (n7);
\end{tikzpicture}
&
\tikzsetnextfilename{hexahedron24} % VTK
\begin{tikzpicture}[scale = 2, tdplot_main_coords]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,2,2,0,2,below right);
\cnode(n3,0,2,0,3,below right);
\cnode(n4,0,0,2,4,below right);
\cnode(n5,2,0,2,5,below right);
\cnode(n6,2,2,2,6,below right);
\cnode(n7,0,2,2,7,below right);
\cnode(n8,1,0,0,8,below right);
\cnode(n9,2,1,0,9,below right);
\cnode(n10,1,2,0,10,below right);
\cnode(n11,0,1,0,11,below right);
\cnode(n12,1,0,2,12,below right);
\cnode(n13,2,1,2,13,below right);
\cnode(n14,1,2,2,14,below right);
\cnode(n15,0,1,2,15,below right);
\cnode(n16,0,0,1,16,below right);
\cnode(n17,2,0,1,17,below right);
\cnode(n18,2,2,1,18,below right);
\cnode(n19,0,2,1,19,below right);
\cnode(n20,0,1,1,20,below right);
\cnode(n21,2,1,1,21,below right);
\cnode(n22,1,0,1,22,below right);
\cnode(n23,1,2,1,23,below right);
\draw (n0) -- (n8) -- (n1) -- (n9) -- (n2) -- (n10) -- (n3) -- (n11) -- (n0);
\draw (n4) -- (n12) -- (n5) -- (n13) -- (n6) -- (n14) -- (n7) -- (n15) -- (n4);
\draw (n0) -- (n16) -- (n4);
\draw (n1) -- (n17) -- (n5);
\draw (n2) -- (n18) -- (n6);
\draw (n3) -- (n19) -- (n7);
\end{tikzpicture}
&
\tikzsetnextfilename{hexahedron27} % VTK != gmsh
\begin{tikzpicture}[scale = 2, tdplot_main_coords]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,2,2,0,2,below right);
\cnode(n3,0,2,0,3,below right);
\cnode(n4,0,0,2,4,below right);
\cnode(n5,2,0,2,5,below right);
\cnode(n6,2,2,2,6,below right);
\cnode(n7,0,2,2,7,below right);
\cnode(n8,1,0,0,8,below right);
\cnode(n9,2,1,0,9,below right);
\cnode(n10,1,2,0,10,below right);
\cnode(n11,0,1,0,11,below right);
\cnode(n12,1,0,2,12,below right);
\cnode(n13,2,1,2,13,below right);
\cnode(n14,1,2,2,14,below right);
\cnode(n15,0,1,2,15,below right);
\cnode(n16,0,0,1,16,below right);
\cnode(n17,2,0,1,17,below right);
\cnode(n18,2,2,1,18,below right);
\cnode(n19,0,2,1,19,below right);
\cnode(n20,0,1,1,20,below right);
\cnode(n21,2,1,1,21,below right);
\cnode(n22,1,0,1,22,below right);
\cnode(n23,1,2,1,23,below right);
\cnode(n24,1,1,0,24,below right);
\cnode(n25,1,1,2,25,below right);
\cnode(n26,1,1,1,26,below right);
\draw (n0) -- (n8) -- (n1) -- (n9) -- (n2) -- (n10) -- (n3) -- (n11) -- (n0);
\draw (n4) -- (n12) -- (n5) -- (n13) -- (n6) -- (n14) -- (n7) -- (n15) -- (n4);
\draw (n0) -- (n16) -- (n4);
\draw (n1) -- (n17) -- (n5);
\draw (n2) -- (n18) -- (n6);
\draw (n3) -- (n19) -- (n7);
\end{tikzpicture}
\\[1 em]
wedge &
wedge12 &
wedge15
\\
\tikzsetnextfilename{wedge} % gmsh != VTK
\begin{tikzpicture}[scale = 2, tdplot_main_coords]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,0,2,0,2,below right);
\cnode(n3,0,0,2,3,below right);
\cnode(n4,2,0,2,4,below right);
\cnode(n5,0,2,2,5,below right);
\draw (n0) -- (n1) -- (n2) -- (n0);
\draw (n3) -- (n4) -- (n5) -- (n3);
\draw (n0) -- (n3);
\draw (n1) -- (n4);
\draw (n2) -- (n5);
\end{tikzpicture}
&
\tikzsetnextfilename{wedge12} % VTK
\begin{tikzpicture}[scale = 2, tdplot_main_coords]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,0,2,0,2,below right);
\cnode(n3,0,0,2,3,below right);
\cnode(n4,2,0,2,4,below right);
\cnode(n5,0,2,2,5,below right);
\cnode(n6,1,0,0,6,below right);
\cnode(n7,1,1,0,7,below right);
\cnode(n8,0,1,0,8,below right);
\cnode(n9,1,0,2,9,below right);
\cnode(n10,1,1,2,10,below right);
\cnode(n11,0,1,2,11,below right);
\draw (n0) -- (n6) -- (n1) -- (n7) -- (n2) -- (n8) -- (n0);
\draw (n3) -- (n9) -- (n4) -- (n10) -- (n5) -- (n11) -- (n3);
\draw (n0) -- (n3);
\draw (n1) -- (n4);
\draw (n2) -- (n5);
\end{tikzpicture}
&
\tikzsetnextfilename{wedge15} % VTK != gmsh
\begin{tikzpicture}[scale = 2, tdplot_main_coords]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,0,2,0,2,below right);
\cnode(n3,0,0,2,3,below right);
\cnode(n4,2,0,2,4,below right);
\cnode(n5,0,2,2,5,below right);
\cnode(n6,1,0,0,6,below right);
\cnode(n7,1,1,0,7,below right);
\cnode(n8,0,1,0,8,below right);
\cnode(n9,1,0,2,9,below right);
\cnode(n10,1,1,2,10,below right);
\cnode(n11,0,1,2,11,below right);
\cnode(n12,0,0,1,12,below right);
\cnode(n13,2,0,1,13,below right);
\cnode(n14,0,2,1,14,below right);
\draw (n0) -- (n6) -- (n1) -- (n7) -- (n2) -- (n8) -- (n0);
\draw (n3) -- (n9) -- (n4) -- (n10) -- (n5) -- (n11) -- (n3);
\draw (n0) -- (n12) -- (n3);
\draw (n1) -- (n13) -- (n4);
\draw (n2) -- (n14) -- (n5);
\end{tikzpicture}
\\[1 em]
pyramid &
pyramid13 &
pyramid14
\\
\tikzsetnextfilename{pyramid}
\begin{tikzpicture}[scale = 2, tdplot_main_coords]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,2,2,0,2,below right);
\cnode(n3,0,2,0,3,below right);
\cnode(n4,1,1,2,4,right);
\draw (n0) -- (n1) -- (n2) -- (n3) -- (n0);
\draw (n0) -- (n4);
\draw (n1) -- (n4);
\draw (n2) -- (n4);
\draw (n3) -- (n4);
\end{tikzpicture}
&
\tikzsetnextfilename{pyramid13} % VTK != gmsh
\begin{tikzpicture}[scale = 2, tdplot_main_coords]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,2,2,0,2,below right);
\cnode(n3,0,2,0,3,below right);
\cnode(n4,1,1,2,4,right);
\cnode(n5,1,0,0,5,below right);
\cnode(n6,2,1,0,6,below right);
\cnode(n7,1,2,0,7,below right);
\cnode(n8,0,1,0,8,below right);
\cnode(n9,0.5,0.5,1,9,below right);
\cnode(n10,1.5,0.5,1,10,below right);
\cnode(n11,1.5,1.5,1,11,below right);
\cnode(n12,0.5,1.5,1,12,right);
\draw (n0) -- (n5) -- (n1) -- (n6) -- (n2) -- (n7) -- (n3) -- (n8) -- (n0);
\draw (n0) -- (n9) -- (n4);
\draw (n1) -- (n10) -- (n4);
\draw (n2) -- (n11) -- (n4);
\draw (n3) -- (n12) -- (n4);
\end{tikzpicture}
&
\tikzsetnextfilename{pyramid14} % gmsh
\begin{tikzpicture}[scale = 2, tdplot_main_coords]
\cnode(n0,0,0,0,0,below right);
\cnode(n1,2,0,0,1,below right);
\cnode(n2,2,2,0,2,below right);
\cnode(n3,0,2,0,3,below right);
\cnode(n4,1,1,2,4,right);
\cnode(n5,1,0,0,5,below right);
\cnode(n6,2,1,0,8,below right);
\cnode(n7,1,2,0,10,below right);
\cnode(n8,0,1,0,6,below right);
\cnode(n9,0.5,0.5,1,7,below right);
\cnode(n10,1.5,0.5,1,9,below right);
\cnode(n11,1.5,1.5,1,11,below right);
\cnode(n12,0.5,1.5,1,12,right);
\cnode(n13,1,1,0,13,below right);
\draw (n0) -- (n5) -- (n1) -- (n6) -- (n2) -- (n7) -- (n3) -- (n8) -- (n0);
\draw (n0) -- (n9) -- (n4);
\draw (n1) -- (n10) -- (n4);
\draw (n2) -- (n11) -- (n4);
\draw (n3) -- (n12) -- (n4);
\end{tikzpicture}
\end{tabular}
\end{document}
justfile 0000664 0000000 0000000 00000001426 14562440725 0012642 0 ustar 00root root 0000000 0000000 version := `python -c "import tomllib; print(tomllib.load(open('pyproject.toml', 'rb'))['project']['version'])"`
default:
@echo "\"just publish\"?"
tag:
@if [ "$(git rev-parse --abbrev-ref HEAD)" != "main" ]; then exit 1; fi
curl -H "Authorization: token `cat ~/.github-access-token`" -d '{"tag_name": "v{{version}}"}' https://api.github.com/repos/nschloe/meshio/releases
upload: clean
@if [ "$(git rev-parse --abbrev-ref HEAD)" != "main" ]; then exit 1; fi
# https://stackoverflow.com/a/58756491/353337
python3 -m build --sdist --wheel .
twine upload dist/*
publish: tag upload
clean:
@find . | grep -E "(__pycache__|\.pyc|\.pyo$)" | xargs rm -rf
@rm -rf src/*.egg-info/ build/ dist/ .tox/
format:
isort .
black .
blacken-docs README.md
lint:
black --check .
flake8 .
logo/ 0000775 0000000 0000000 00000000000 14562440725 0012027 5 ustar 00root root 0000000 0000000 logo/logo.py 0000664 0000000 0000000 00000005161 14562440725 0013344 0 ustar 00root root 0000000 0000000 import numpy as np
import optimesh
import pygmsh
import meshio
# def _old_logo()
# with pygmsh.occ.Geometry() as geom:
# characteristic_length_min = 0.5
# characteristic_length_max = 0.5
#
# container = geom.add_rectangle([0.0, 0.0, 0.0], 10.0, 10.0)
#
# letter_i = geom.add_rectangle([2.0, 2.0, 0.0], 1.0, 4.5)
# i_dot = geom.add_disk([2.5, 7.5, 0.0], 0.6)
#
# disk1 = geom.add_disk([6.25, 4.5, 0.0], 2.5)
# disk2 = geom.add_disk([6.25, 4.5, 0.0], 1.5)
# letter_o = geom.boolean_difference([disk1], [disk2])
#
# geom.boolean_difference([container], [letter_i, i_dot, letter_o])
#
# mesh = pygmsh.generate_mesh(geom)
#
# X, cells = mesh.points, mesh.cells
# X, cells = optimesh.cvt.lloyd.quasi_newton_uniform_lloyd(
# X, cells["triangle"], 1.0e-3, 1000
# )
# return X, cells
def create_logo2(y=0.0):
with pygmsh.geo.Geometry() as geom:
mesh_size = 0.15
arrow1 = geom.add_polygon(
[
[0.10, 0.70 - y, 0.0],
[0.35, 0.60 - y, 0.0],
[0.35, 0.65 - y, 0.0],
[0.80, 0.65 - y, 0.0],
[0.80, 0.75 - y, 0.0],
[0.35, 0.75 - y, 0.0],
[0.35, 0.80 - y, 0.0],
],
mesh_size=mesh_size,
make_surface=False,
)
arrow2 = geom.add_polygon(
[
[0.90, 0.30 + y, 0.0],
[0.65, 0.40 + y, 0.0],
[0.65, 0.35 + y, 0.0],
[0.20, 0.35 + y, 0.0],
[0.20, 0.25 + y, 0.0],
[0.65, 0.25 + y, 0.0],
[0.65, 0.20 + y, 0.0],
],
mesh_size=mesh_size,
make_surface=False,
)
geom.add_polygon(
[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0]],
mesh_size=mesh_size,
holes=[arrow1, arrow2],
)
mesh = geom.generate_mesh()
# return mesh.points, mesh.cells["triangle"]
X = mesh.points
cells = mesh.get_cells_type("triangle")
# np.bincount doesn't work with uint
#
cells = cells.astype(int)
X, cells = optimesh.cvt.quasi_newton_uniform_full(
X, cells, 1.0e-10, 100, verbose=True
)
return X, cells
if __name__ == "__main__":
X, cells = create_logo2(y=0.08)
mesh = meshio.Mesh(X, {"triangle": cells})
meshio.svg.write("logo.svg", mesh, image_width=300)
X = np.column_stack([X, np.zeros_like(X[:, 0])])
meshio.Mesh(X, {"triangle": cells}).write("logo.vtk")
pyproject.toml 0000664 0000000 0000000 00000003054 14562440725 0014005 0 ustar 00root root 0000000 0000000 [build-system]
requires = ["setuptools>=42", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "meshio"
version = "5.3.5"
description = "I/O for many mesh formats"
readme = "README.md"
requires-python = ">=3.8"
license = {file = "LICENSE.txt"}
keywords = [
"mesh",
"file formats",
"scientific",
"engineering",
"fem",
"finite elements"
]
authors = [
{email = "nico.schloemer@gmail.com"},
{name = "Nico Schlömer"}
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering",
"Topic :: Utilities",
]
dependencies = [
"importlib_metadata; python_version<'3.8'",
"numpy>=1.20.0",
"rich",
]
[project.optional-dependencies]
all = [
"netCDF4",
"h5py" # CGNS, H5M, MED, XDMF formats
]
[project.urls]
homepage = "https://github.com/nschloe/meshio"
code = "https://github.com/nschloe/meshio"
issues = "https://github.com/nschloe/meshio/issues"
[project.entry-points.console_scripts]
meshio = "meshio._cli:main"
[tool.isort]
profile = "black"
# [options.data_files]
# share/paraview-5.9/plugins =
# tools/paraview-meshio-plugin.py
src/ 0000775 0000000 0000000 00000000000 14562440725 0011656 5 ustar 00root root 0000000 0000000 src/meshio/ 0000775 0000000 0000000 00000000000 14562440725 0013142 5 ustar 00root root 0000000 0000000 src/meshio/__about__.py 0000664 0000000 0000000 00000000436 14562440725 0015425 0 ustar 00root root 0000000 0000000 try:
# Python 3.8+
from importlib import metadata
except ImportError:
try:
import importlib_metadata as metadata
except ImportError:
__version__ = "unknown"
try:
__version__ = metadata.version("meshio")
except Exception:
__version__ = "unknown"
src/meshio/__init__.py 0000664 0000000 0000000 00000002352 14562440725 0015255 0 ustar 00root root 0000000 0000000 from . import (
_cli,
abaqus,
ansys,
avsucd,
cgns,
dolfin,
exodus,
flac3d,
gmsh,
h5m,
hmf,
mdpa,
med,
medit,
nastran,
netgen,
neuroglancer,
obj,
off,
permas,
ply,
stl,
su2,
svg,
tecplot,
tetgen,
ugrid,
vtk,
vtu,
wkt,
xdmf,
)
from .__about__ import __version__
from ._exceptions import ReadError, WriteError
from ._helpers import (
deregister_format,
extension_to_filetypes,
read,
register_format,
write,
write_points_cells,
)
from ._mesh import CellBlock, Mesh
__all__ = [
"abaqus",
"ansys",
"avsucd",
"cgns",
"dolfin",
"exodus",
"flac3d",
"gmsh",
"h5m",
"hmf",
"mdpa",
"med",
"medit",
"nastran",
"netgen",
"neuroglancer",
"obj",
"off",
"permas",
"ply",
"stl",
"su2",
"svg",
"tecplot",
"tetgen",
"ugrid",
"vtk",
"vtu",
"wkt",
"xdmf",
"_cli",
"read",
"write",
"register_format",
"deregister_format",
"write_points_cells",
"extension_to_filetypes",
"Mesh",
"CellBlock",
"ReadError",
"WriteError",
"topological_dimension",
"__version__",
]
src/meshio/_cli/ 0000775 0000000 0000000 00000000000 14562440725 0014050 5 ustar 00root root 0000000 0000000 src/meshio/_cli/__init__.py 0000664 0000000 0000000 00000000054 14562440725 0016160 0 ustar 00root root 0000000 0000000 from ._main import main
__all__ = ["main"]
src/meshio/_cli/_ascii.py 0000664 0000000 0000000 00000003515 14562440725 0015655 0 ustar 00root root 0000000 0000000 import os
import pathlib
from .. import ansys, flac3d, gmsh, mdpa, ply, stl, vtk, vtu, xdmf
from .._common import error
from .._helpers import _filetypes_from_path, read, reader_map
def add_args(parser):
parser.add_argument("infile", type=str, help="mesh file to convert")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=sorted(list(reader_map.keys())),
help="input file format",
default=None,
)
def ascii(args):
if args.input_format:
fmts = [args.input_format]
else:
fmts = _filetypes_from_path(pathlib.Path(args.infile))
# pick the first
fmt = fmts[0]
size = os.stat(args.infile).st_size
print(f"File size before: {size / 1024 ** 2:.2f} MB")
mesh = read(args.infile, file_format=args.input_format)
# # Some converters (like VTK) require `points` to be contiguous.
# mesh.points = np.ascontiguousarray(mesh.points)
# write it out
if fmt == "ansys":
ansys.write(args.infile, mesh, binary=False)
elif fmt == "flac3d":
flac3d.write(args.infile, mesh, binary=False)
elif fmt == "gmsh":
gmsh.write(args.infile, mesh, binary=False)
elif fmt == "mdpa":
mdpa.write(args.infile, mesh, binary=False)
elif fmt == "ply":
ply.write(args.infile, mesh, binary=False)
elif fmt == "stl":
stl.write(args.infile, mesh, binary=False)
elif fmt == "vtk":
vtk.write(args.infile, mesh, binary=False)
elif fmt == "vtu":
vtu.write(args.infile, mesh, binary=False)
elif fmt == "xdmf":
xdmf.write(args.infile, mesh, data_format="XML")
else:
error(f"Don't know how to convert {args.infile} to ASCII format.")
return 1
size = os.stat(args.infile).st_size
print(f"File size after: {size / 1024 ** 2:.2f} MB")
return 0
src/meshio/_cli/_binary.py 0000664 0000000 0000000 00000003435 14562440725 0016052 0 ustar 00root root 0000000 0000000 import os
import pathlib
from .. import ansys, flac3d, gmsh, mdpa, ply, stl, vtk, vtu, xdmf
from .._helpers import _filetypes_from_path, read, reader_map
def add_args(parser):
parser.add_argument("infile", type=str, help="mesh file to convert")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=sorted(list(reader_map.keys())),
help="input file format",
default=None,
)
def binary(args):
if args.input_format:
fmts = [args.input_format]
else:
fmts = _filetypes_from_path(pathlib.Path(args.infile))
# pick the first
fmt = fmts[0]
size = os.stat(args.infile).st_size
print(f"File size before: {size / 1024 ** 2:.2f} MB")
mesh = read(args.infile, file_format=args.input_format)
# # Some converters (like VTK) require `points` to be contiguous.
# mesh.points = np.ascontiguousarray(mesh.points)
# write it out
if fmt == "ansys":
ansys.write(args.infile, mesh, binary=True)
elif fmt == "flac3d":
flac3d.write(args.infile, mesh, binary=True)
elif fmt == "gmsh":
gmsh.write(args.infile, mesh, binary=True)
elif fmt == "mdpa":
mdpa.write(args.infile, mesh, binary=True)
elif fmt == "ply":
ply.write(args.infile, mesh, binary=True)
elif fmt == "stl":
stl.write(args.infile, mesh, binary=True)
elif fmt == "vtk":
vtk.write(args.infile, mesh, binary=True)
elif fmt == "vtu":
vtu.write(args.infile, mesh, binary=True)
elif fmt == "xdmf":
xdmf.write(args.infile, mesh, data_format="HDF")
else:
print(f"Don't know how to convert {args.infile} to binary format.")
exit(1)
size = os.stat(args.infile).st_size
print(f"File size after: {size / 1024 ** 2:.2f} MB")
src/meshio/_cli/_compress.py 0000664 0000000 0000000 00000004527 14562440725 0016424 0 ustar 00root root 0000000 0000000 import os
import pathlib
from .. import ansys, cgns, gmsh, h5m, mdpa, ply, stl, vtk, vtu, xdmf
from .._common import error
from .._helpers import _filetypes_from_path, read, reader_map
def add_args(parser):
parser.add_argument("infile", type=str, help="mesh file to compress")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=sorted(list(reader_map.keys())),
help="input file format",
default=None,
)
parser.add_argument(
"--max",
"-max",
action="store_true",
help="maximum compression",
default=False,
)
def compress(args):
if args.input_format:
fmts = [args.input_format]
else:
fmts = _filetypes_from_path(pathlib.Path(args.infile))
# pick the first
fmt = fmts[0]
size = os.stat(args.infile).st_size
print(f"File size before: {size / 1024 ** 2:.2f} MB")
mesh = read(args.infile, file_format=args.input_format)
# # Some converters (like VTK) require `points` to be contiguous.
# mesh.points = np.ascontiguousarray(mesh.points)
# write it out
if fmt == "ansys":
ansys.write(args.infile, mesh, binary=True)
elif fmt == "cgns":
cgns.write(
args.infile, mesh, compression="gzip", compression_opts=9 if args.max else 4
)
elif fmt == "gmsh":
gmsh.write(args.infile, mesh, binary=True)
elif fmt == "h5m":
h5m.write(
args.infile, mesh, compression="gzip", compression_opts=9 if args.max else 4
)
elif fmt == "mdpa":
mdpa.write(args.infile, mesh, binary=True)
elif fmt == "ply":
ply.write(args.infile, mesh, binary=True)
elif fmt == "stl":
stl.write(args.infile, mesh, binary=True)
elif fmt == "vtk":
vtk.write(args.infile, mesh, binary=True)
elif fmt == "vtu":
vtu.write(
args.infile, mesh, binary=True, compression="lzma" if args.max else "zlib"
)
elif fmt == "xdmf":
xdmf.write(
args.infile,
mesh,
data_format="HDF",
compression="gzip",
compression_opts=9 if args.max else 4,
)
else:
error(f"Don't know how to compress {args.infile}.")
exit(1)
size = os.stat(args.infile).st_size
print(f"File size after: {size / 1024 ** 2:.2f} MB")
src/meshio/_cli/_convert.py 0000664 0000000 0000000 00000004175 14562440725 0016250 0 ustar 00root root 0000000 0000000 import numpy as np
from .._helpers import _writer_map, read, reader_map, write
def add_args(parser):
parser.add_argument("infile", type=str, help="mesh file to be read from")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=sorted(list(reader_map.keys())),
help="input file format",
default=None,
)
parser.add_argument(
"--output-format",
"-o",
type=str,
choices=sorted(list(_writer_map.keys())),
help="output file format",
default=None,
)
parser.add_argument(
"--ascii",
"-a",
action="store_true",
help="write in ASCII format variant (where applicable, default: binary)",
)
parser.add_argument("outfile", type=str, help="mesh file to be written to")
parser.add_argument(
"--float-format",
"-f",
type=str,
help="float format used in output ASCII files (default: .16e)",
)
parser.add_argument(
"--sets-to-int-data",
"-s",
action="store_true",
help="if possible, convert sets to integer data (useful if the output type does not support sets)",
)
parser.add_argument(
"--int-data-to-sets",
"-d",
action="store_true",
help="if possible, convert integer data to sets (useful if the output type does not support integer data)",
)
def convert(args):
# read mesh data
mesh = read(args.infile, file_format=args.input_format)
# Some converters (like VTK) require `points` to be contiguous.
mesh.points = np.ascontiguousarray(mesh.points)
if args.sets_to_int_data:
mesh.point_sets_to_data()
mesh.cell_sets_to_data()
if args.int_data_to_sets:
for key in mesh.point_data:
mesh.point_data_to_sets(key)
for key in mesh.cell_data:
mesh.cell_data_to_sets(key)
# write it out
kwargs = {"file_format": args.output_format}
if args.float_format is not None:
kwargs["float_fmt"] = args.float_format
if args.ascii:
kwargs["binary"] = False
write(args.outfile, mesh, **kwargs)
src/meshio/_cli/_decompress.py 0000664 0000000 0000000 00000002710 14562440725 0016725 0 ustar 00root root 0000000 0000000 import os
import pathlib
from .. import cgns, h5m, vtu, xdmf
from .._common import error
from .._helpers import _filetypes_from_path, read, reader_map
def add_args(parser):
parser.add_argument("infile", type=str, help="mesh file to decompress")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=sorted(list(reader_map.keys())),
help="input file format",
default=None,
)
def decompress(args):
if args.input_format:
fmts = [args.input_format]
else:
fmts = _filetypes_from_path(pathlib.Path(args.infile))
# pick the first
fmt = fmts[0]
size = os.stat(args.infile).st_size
print(f"File size before: {size / 1024 ** 2:.2f} MB")
mesh = read(args.infile, file_format=args.input_format)
# # Some converters (like VTK) require `points` to be contiguous.
# mesh.points = np.ascontiguousarray(mesh.points)
# write it out
if fmt == "cgns":
cgns.write(args.infile, mesh, compression=None)
elif fmt == "h5m":
h5m.write(args.infile, mesh, compression=None)
elif fmt == "vtu":
vtu.write(args.infile, mesh, binary=True, compression=None)
elif fmt == "xdmf":
xdmf.write(args.infile, mesh, data_format="HDF", compression=None)
else:
error(f"Don't know how to decompress {args.infile}.")
exit(1)
size = os.stat(args.infile).st_size
print(f"File size after: {size / 1024 ** 2:.2f} MB")
src/meshio/_cli/_info.py 0000664 0000000 0000000 00000002130 14562440725 0015510 0 ustar 00root root 0000000 0000000 import numpy as np
from .._common import warn
from .._helpers import read, reader_map
def add_args(parser):
parser.add_argument("infile", type=str, help="mesh file to be read from")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=sorted(list(reader_map.keys())),
help="input file format",
default=None,
)
def info(args):
# read mesh data
mesh = read(args.infile, file_format=args.input_format)
print(mesh)
# check if the cell arrays are consistent with the points
is_consistent = True
for cells in mesh.cells:
if np.any(cells.data > mesh.points.shape[0]):
warn("Inconsistent mesh. Cells refer to nonexistent points.")
is_consistent = False
break
# check if there are redundant points
if is_consistent:
point_is_used = np.zeros(mesh.points.shape[0], dtype=bool)
for cells in mesh.cells:
point_is_used[cells.data] = True
if np.any(~point_is_used):
warn("Some points are not part of any cell.")
return 0
src/meshio/_cli/_main.py 0000664 0000000 0000000 00000003617 14562440725 0015514 0 ustar 00root root 0000000 0000000 import argparse
from sys import version_info
from ..__about__ import __version__
from . import _ascii, _binary, _compress, _convert, _decompress, _info
def main(argv=None):
parent_parser = argparse.ArgumentParser(
description="Mesh input/output tools.",
formatter_class=argparse.RawTextHelpFormatter,
)
parent_parser.add_argument(
"--version",
"-v",
action="version",
version=_get_version_text(),
help="display version information",
)
subparsers = parent_parser.add_subparsers(
title="subcommands", dest="command", required=True
)
parser = subparsers.add_parser("convert", help="Convert mesh files", aliases=["c"])
_convert.add_args(parser)
parser.set_defaults(func=_convert.convert)
parser = subparsers.add_parser("info", help="Print mesh info", aliases=["i"])
_info.add_args(parser)
parser.set_defaults(func=_info.info)
parser = subparsers.add_parser("compress", help="Compress mesh file")
_compress.add_args(parser)
parser.set_defaults(func=_compress.compress)
parser = subparsers.add_parser("decompress", help="Decompress mesh file")
_decompress.add_args(parser)
parser.set_defaults(func=_decompress.decompress)
parser = subparsers.add_parser("ascii", help="Convert to ASCII", aliases=["a"])
_ascii.add_args(parser)
parser.set_defaults(func=_ascii.ascii)
parser = subparsers.add_parser("binary", help="Convert to binary", aliases=["b"])
_binary.add_args(parser)
parser.set_defaults(func=_binary.binary)
args = parent_parser.parse_args(argv)
return args.func(args)
def _get_version_text():
python_version = f"{version_info.major}.{version_info.minor}.{version_info.micro}"
return "\n".join(
[
f"meshio {__version__} [Python {python_version}]",
"Copyright (c) 2015-2021 Nico Schlömer et al.",
]
)
src/meshio/_common.py 0000664 0000000 0000000 00000007453 14562440725 0015154 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from xml.etree import ElementTree as ET
import numpy as np
from rich.console import Console
# See for the node
# ordering.
num_nodes_per_cell = {
"vertex": 1,
"line": 2,
"triangle": 3,
"quad": 4,
"quad8": 8,
"tetra": 4,
"hexahedron": 8,
"hexahedron20": 20,
"hexahedron24": 24,
"wedge": 6,
"pyramid": 5,
#
"line3": 3,
"triangle6": 6,
"quad9": 9,
"tetra10": 10,
"hexahedron27": 27,
"wedge15": 15,
"wedge18": 18,
"pyramid13": 13,
"pyramid14": 14,
#
"line4": 4,
"triangle10": 10,
"quad16": 16,
"tetra20": 20,
"wedge40": 40,
"hexahedron64": 64,
#
"line5": 5,
"triangle15": 15,
"quad25": 25,
"tetra35": 35,
"wedge75": 75,
"hexahedron125": 125,
#
"line6": 6,
"triangle21": 21,
"quad36": 36,
"tetra56": 56,
"wedge126": 126,
"hexahedron216": 216,
#
"line7": 7,
"triangle28": 28,
"quad49": 49,
"tetra84": 84,
"wedge196": 196,
"hexahedron343": 343,
#
"line8": 8,
"triangle36": 36,
"quad64": 64,
"tetra120": 120,
"wedge288": 288,
"hexahedron512": 512,
#
"line9": 9,
"triangle45": 45,
"quad81": 81,
"tetra165": 165,
"wedge405": 405,
"hexahedron729": 729,
#
"line10": 10,
"triangle55": 55,
"quad100": 100,
"tetra220": 220,
"wedge550": 550,
"hexahedron1000": 1000,
"hexahedron1331": 1331,
#
"line11": 11,
"triangle66": 66,
"quad121": 121,
"tetra286": 286,
}
def cell_data_from_raw(cells, cell_data_raw):
cs = np.cumsum([len(c) for c in cells])[:-1]
return {name: np.split(d, cs) for name, d in cell_data_raw.items()}
def raw_from_cell_data(cell_data):
return {name: np.concatenate(value) for name, value in cell_data.items()}
def write_xml(filename, root):
tree = ET.ElementTree(root)
tree.write(filename)
def _pick_first_int_data(data):
# pick out material
keys = list(data.keys())
candidate_keys = []
for key in keys:
# works for point_data and cell_data
if data[key][0].dtype.kind in ["i", "u"]: # int or uint
candidate_keys.append(key)
if len(candidate_keys) > 0:
# pick the first
key = candidate_keys[0]
idx = keys.index(key)
other = keys[:idx] + keys[idx + 1 :]
else:
key = None
other = []
return key, other
def info(string, highlight: bool = True) -> None:
Console(stderr=True).print(f"[bold]Info:[/bold] {string}", highlight=highlight)
def warn(string, highlight: bool = True) -> None:
Console(stderr=True).print(
f"[yellow][bold]Warning:[/bold] {string}[/yellow]", highlight=highlight
)
def error(string, highlight: bool = True) -> None:
Console(stderr=True).print(
f"[red][bold]Error:[/bold] {string}[/red]", highlight=highlight
)
def is_in_any(string: str, strings: list[str]) -> bool:
"""True if `string` is contained in any of `strings`."""
for s in strings:
if string in s:
return True
return False
def join_strings(strings: list[str]) -> tuple[str, str]:
"""Join strings such that they can be uniquely split again afterwards."""
possible_join_chars = ["-", "_", "#", "+", "/"]
char = None
for c in possible_join_chars:
if not is_in_any(c, strings):
char = c
break
assert char is not None
return char.join(strings), char
def replace_space(string: str) -> tuple[str, str]:
possible_chars = ["_", "-", "+", "X", "/", "#"]
char = None
for c in possible_chars:
if c not in string:
char = c
break
assert char is not None
return string.replace(" ", char), char
src/meshio/_cxml/ 0000775 0000000 0000000 00000000000 14562440725 0014244 5 ustar 00root root 0000000 0000000 src/meshio/_cxml/__init__.py 0000664 0000000 0000000 00000000051 14562440725 0016351 0 ustar 00root root 0000000 0000000 from . import etree
__all__ = ["etree"]
src/meshio/_cxml/etree.py 0000664 0000000 0000000 00000003511 14562440725 0015722 0 ustar 00root root 0000000 0000000 # This XML writer is a drop-in replacement for LXML/Python XML Etree. It only offers one
# other member: self.text_write.
# The problem is that, for LXML, the entire etree has to be constructed in memory before
# writing it to a file. Many mesh formats that use XML have lots of int or float data
# written in the text fields. Converting this to ASCII first requires a lot of memory.
# This etree here allows the writing method to write to the file directly, without
# having to create a string representation first.
class Element:
def __init__(self, name, **kwargs):
self.name = name
self.attrib = kwargs
self._children = []
self.text = None
self.text_writer = None
def insert(self, pos, elem):
self._children.insert(pos, elem)
def set(self, key, value):
self.attrib[key] = value
def write(self, f):
kw_list = [f'{key}="{value}"' for key, value in self.attrib.items()]
f.write("<{}>\n".format(" ".join([self.name] + kw_list)))
if self.text:
f.write(self.text)
f.write("\n")
if self.text_writer:
self.text_writer(f)
f.write("\n")
for child in self._children:
child.write(f)
f.write(f"{self.name}>\n")
class SubElement(Element):
def __init__(self, parent, name, **kwargs):
super().__init__(name, **kwargs)
parent._children.append(self)
class Comment:
def __init__(self, text):
self.text = text
def write(self, f):
f.write(f"\n")
class ElementTree:
def __init__(self, root):
self.root = root
def write(self, filename, xml_declaration=True):
with open(filename, "w") as f:
if xml_declaration:
f.write('\n')
self.root.write(f)
src/meshio/_exceptions.py 0000664 0000000 0000000 00000000172 14562440725 0016034 0 ustar 00root root 0000000 0000000 class ReadError(Exception):
pass
class WriteError(Exception):
pass
class CorruptionError(Exception):
pass
src/meshio/_files.py 0000664 0000000 0000000 00000000555 14562440725 0014762 0 ustar 00root root 0000000 0000000 from contextlib import contextmanager
def is_buffer(obj, mode):
return ("r" in mode and hasattr(obj, "read")) or (
"w" in mode and hasattr(obj, "write")
)
@contextmanager
def open_file(path_or_buf, mode="r"):
if is_buffer(path_or_buf, mode):
yield path_or_buf
else:
with open(path_or_buf, mode) as f:
yield f
src/meshio/_helpers.py 0000664 0000000 0000000 00000012737 14562440725 0015327 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import sys
from pathlib import Path
import numpy as np
from numpy.typing import ArrayLike
from ._common import error, num_nodes_per_cell
from ._exceptions import ReadError, WriteError
from ._files import is_buffer
from ._mesh import CellBlock, Mesh
extension_to_filetypes = {}
reader_map = {}
_writer_map = {}
def register_format(
format_name: str, extensions: list[str], reader, writer_map
) -> None:
for ext in extensions:
if ext not in extension_to_filetypes:
extension_to_filetypes[ext] = []
extension_to_filetypes[ext].append(format_name)
if reader is not None:
reader_map[format_name] = reader
_writer_map.update(writer_map)
def deregister_format(format_name: str):
for value in extension_to_filetypes.values():
if format_name in value:
value.remove(format_name)
if format_name in reader_map:
reader_map.pop(format_name)
if format_name in _writer_map:
_writer_map.pop(format_name)
def _filetypes_from_path(path: Path) -> list[str]:
ext = ""
out = []
for suffix in reversed(path.suffixes):
ext = (suffix + ext).lower()
try:
out += extension_to_filetypes[ext]
except KeyError:
pass
if not out:
raise ReadError(f"Could not deduce file format from path '{path}'.")
return out
def read(filename, file_format: str | None = None):
"""Reads an unstructured mesh with added data.
:param filenames: The files/PathLikes to read from.
:type filenames: str
:returns mesh{2,3}d: The mesh data.
"""
if is_buffer(filename, "r"):
return _read_buffer(filename, file_format)
return _read_file(Path(filename), file_format)
def _read_buffer(filename, file_format: str | None):
if file_format is None:
raise ReadError("File format must be given if buffer is used")
if file_format == "tetgen":
raise ReadError(
"tetgen format is spread across multiple files "
"and so cannot be read from a buffer"
)
if file_format not in reader_map:
raise ReadError(f"Unknown file format '{file_format}'")
return reader_map[file_format](filename)
def _read_file(path: Path, file_format: str | None):
if not path.exists():
raise ReadError(f"File {path} not found.")
if file_format:
possible_file_formats = [file_format]
else:
# deduce possible file formats from extension
possible_file_formats = _filetypes_from_path(path)
for file_format in possible_file_formats:
if file_format not in reader_map:
raise ReadError(f"Unknown file format '{file_format}' of '{path}'.")
try:
return reader_map[file_format](str(path))
except ReadError as e:
print(e)
if len(possible_file_formats) == 1:
msg = f"Couldn't read file {path} as {possible_file_formats[0]}"
else:
lst = ", ".join(possible_file_formats)
msg = f"Couldn't read file {path} as either of {lst}"
error(msg)
sys.exit(1)
def write_points_cells(
filename,
points: ArrayLike,
cells: dict[str, ArrayLike] | list[tuple[str, ArrayLike] | CellBlock],
point_data: dict[str, ArrayLike] | None = None,
cell_data: dict[str, list[ArrayLike]] | None = None,
field_data=None,
point_sets: dict[str, ArrayLike] | None = None,
cell_sets: dict[str, list[ArrayLike]] | None = None,
file_format: str | None = None,
**kwargs,
):
points = np.asarray(points)
mesh = Mesh(
points,
cells,
point_data=point_data,
cell_data=cell_data,
field_data=field_data,
point_sets=point_sets,
cell_sets=cell_sets,
)
mesh.write(filename, file_format=file_format, **kwargs)
def write(filename, mesh: Mesh, file_format: str | None = None, **kwargs):
"""Writes mesh together with data to a file.
:params filename: File to write to.
:type filename: str
:params point_data: Named additional point data to write to the file.
:type point_data: dict
"""
if is_buffer(filename, "r"):
if file_format is None:
raise WriteError("File format must be supplied if `filename` is a buffer")
if file_format == "tetgen":
raise WriteError(
"tetgen format is spread across multiple files, and so cannot be written to a buffer"
)
else:
path = Path(filename)
if not file_format:
# deduce possible file formats from extension
file_formats = _filetypes_from_path(path)
# just take the first one
file_format = file_formats[0]
try:
writer = _writer_map[file_format]
except KeyError:
formats = sorted(list(_writer_map.keys()))
raise WriteError(f"Unknown format '{file_format}'. Pick one of {formats}")
# check cells for sanity
for cell_block in mesh.cells:
key = cell_block.type
value = cell_block.data
if key in num_nodes_per_cell:
if value.shape[1] != num_nodes_per_cell[key]:
raise WriteError(
f"Unexpected cells array shape {value.shape} for {key} cells. "
+ f"Expected shape [:, {num_nodes_per_cell[key]}]."
)
else:
# we allow custom keys and
# cannot check those
pass
# Write
return writer(filename, mesh, **kwargs)
src/meshio/_mesh.py 0000664 0000000 0000000 00000033711 14562440725 0014614 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import copy
import numpy as np
from numpy.typing import ArrayLike
from ._common import num_nodes_per_cell, warn
topological_dimension = {
"line": 1,
"polygon": 2,
"triangle": 2,
"quad": 2,
"tetra": 3,
"hexahedron": 3,
"wedge": 3,
"pyramid": 3,
"line3": 1,
"triangle6": 2,
"quad9": 2,
"tetra10": 3,
"hexahedron27": 3,
"wedge18": 3,
"pyramid14": 3,
"vertex": 0,
"quad8": 2,
"hexahedron20": 3,
"triangle10": 2,
"triangle15": 2,
"triangle21": 2,
"line4": 1,
"line5": 1,
"line6": 1,
"tetra20": 3,
"tetra35": 3,
"tetra56": 3,
"quad16": 2,
"quad25": 2,
"quad36": 2,
"triangle28": 2,
"triangle36": 2,
"triangle45": 2,
"triangle55": 2,
"triangle66": 2,
"quad49": 2,
"quad64": 2,
"quad81": 2,
"quad100": 2,
"quad121": 2,
"line7": 1,
"line8": 1,
"line9": 1,
"line10": 1,
"line11": 1,
"tetra84": 3,
"tetra120": 3,
"tetra165": 3,
"tetra220": 3,
"tetra286": 3,
"wedge40": 3,
"wedge75": 3,
"hexahedron64": 3,
"hexahedron125": 3,
"hexahedron216": 3,
"hexahedron343": 3,
"hexahedron512": 3,
"hexahedron729": 3,
"hexahedron1000": 3,
"wedge126": 3,
"wedge196": 3,
"wedge288": 3,
"wedge405": 3,
"wedge550": 3,
"VTK_LAGRANGE_CURVE": 1,
"VTK_LAGRANGE_TRIANGLE": 2,
"VTK_LAGRANGE_QUADRILATERAL": 2,
"VTK_LAGRANGE_TETRAHEDRON": 3,
"VTK_LAGRANGE_HEXAHEDRON": 3,
"VTK_LAGRANGE_WEDGE": 3,
"VTK_LAGRANGE_PYRAMID": 3,
}
class CellBlock:
def __init__(
self,
cell_type: str,
data: list | np.ndarray,
tags: list[str] | None = None,
):
self.type = cell_type
self.data = data
if cell_type.startswith("polyhedron"):
self.dim = 3
else:
self.data = np.asarray(self.data)
self.dim = topological_dimension[cell_type]
self.tags = [] if tags is None else tags
def __repr__(self):
items = [
"meshio CellBlock",
f"type: {self.type}",
f"num cells: {len(self.data)}",
f"tags: {self.tags}",
]
return "<" + ", ".join(items) + ">"
def __len__(self):
return len(self.data)
class Mesh:
def __init__(
self,
points: ArrayLike,
cells: dict[str, ArrayLike] | list[tuple[str, ArrayLike] | CellBlock],
point_data: dict[str, ArrayLike] | None = None,
cell_data: dict[str, list[ArrayLike]] | None = None,
field_data=None,
point_sets: dict[str, ArrayLike] | None = None,
cell_sets: dict[str, list[ArrayLike]] | None = None,
gmsh_periodic=None,
info=None,
):
self.points = np.asarray(points)
if isinstance(cells, dict):
# Let's not deprecate this for now.
# warn(
# "cell dictionaries are deprecated, use list of tuples, e.g., "
# '[("triangle", [[0, 1, 2], ...])]',
# DeprecationWarning,
# )
# old dict, deprecated
#
# convert dict to list of tuples
cells = list(cells.items())
self.cells = []
for cell_block in cells:
if isinstance(cell_block, tuple):
cell_type, data = cell_block
cell_block = CellBlock(
cell_type,
# polyhedron data cannot be converted to numpy arrays
# because the sublists don't all have the same length
data if cell_type.startswith("polyhedron") else np.asarray(data),
)
self.cells.append(cell_block)
self.point_data = {} if point_data is None else point_data
self.cell_data = {} if cell_data is None else cell_data
self.field_data = {} if field_data is None else field_data
self.point_sets = {} if point_sets is None else point_sets
self.cell_sets = {} if cell_sets is None else cell_sets
self.gmsh_periodic = gmsh_periodic
self.info = info
# assert point data consistency and convert to numpy arrays
for key, item in self.point_data.items():
self.point_data[key] = np.asarray(item)
if len(self.point_data[key]) != len(self.points):
raise ValueError(
f"len(points) = {len(self.points)}, "
f'but len(point_data["{key}"]) = {len(self.point_data[key])}'
)
# assert cell data consistency and convert to numpy arrays
for key, data in self.cell_data.items():
if len(data) != len(cells):
raise ValueError(
f"Incompatible cell data '{key}'. "
f"{len(cells)} cell blocks, but '{key}' has {len(data)} blocks."
)
for k in range(len(data)):
data[k] = np.asarray(data[k])
if len(data[k]) != len(self.cells[k]):
raise ValueError(
"Incompatible cell data. "
+ f"Cell block {k} ('{self.cells[k].type}') "
+ f"has length {len(self.cells[k])}, but "
+ f"corresponding cell data item has length {len(data[k])}."
)
def __repr__(self):
lines = ["", f" Number of points: {len(self.points)}"]
special_cells = [
"polygon",
"polyhedron",
"VTK_LAGRANGE_CURVE",
"VTK_LAGRANGE_TRIANGLE",
"VTK_LAGRANGE_QUADRILATERAL",
"VTK_LAGRANGE_TETRAHEDRON",
"VTK_LAGRANGE_HEXAHEDRON",
"VTK_LAGRANGE_WEDGE",
"VTK_LAGRANGE_PYRAMID",
]
if len(self.cells) > 0:
lines.append(" Number of cells:")
for cell_block in self.cells:
string = cell_block.type
if cell_block.type in special_cells:
string += f"({cell_block.data.shape[1]})"
lines.append(f" {string}: {len(cell_block)}")
else:
lines.append(" No cells.")
if self.point_sets:
names = ", ".join(self.point_sets.keys())
lines.append(f" Point sets: {names}")
if self.cell_sets:
names = ", ".join(self.cell_sets.keys())
lines.append(f" Cell sets: {names}")
if self.point_data:
names = ", ".join(self.point_data.keys())
lines.append(f" Point data: {names}")
if self.cell_data:
names = ", ".join(self.cell_data.keys())
lines.append(f" Cell data: {names}")
if self.field_data:
names = ", ".join(self.field_data.keys())
lines.append(f" Field data: {names}")
return "\n".join(lines)
def copy(self):
return copy.deepcopy(self)
def write(self, path_or_buf, file_format: str | None = None, **kwargs):
# avoid circular import
from ._helpers import write
write(path_or_buf, self, file_format, **kwargs)
def get_cells_type(self, cell_type: str):
if not any(c.type == cell_type for c in self.cells):
return np.empty((0, num_nodes_per_cell[cell_type]), dtype=int)
return np.concatenate([c.data for c in self.cells if c.type == cell_type])
def get_cell_data(self, name: str, cell_type: str):
return np.concatenate(
[d for c, d in zip(self.cells, self.cell_data[name]) if c.type == cell_type]
)
@property
def cells_dict(self):
cells_dict = {}
for cell_block in self.cells:
if cell_block.type not in cells_dict:
cells_dict[cell_block.type] = []
cells_dict[cell_block.type].append(cell_block.data)
# concatenate
for key, value in cells_dict.items():
cells_dict[key] = np.concatenate(value)
return cells_dict
@property
def cell_data_dict(self):
cell_data_dict = {}
for key, value_list in self.cell_data.items():
cell_data_dict[key] = {}
for value, cell_block in zip(value_list, self.cells):
if cell_block.type not in cell_data_dict[key]:
cell_data_dict[key][cell_block.type] = []
cell_data_dict[key][cell_block.type].append(value)
for cell_type, val in cell_data_dict[key].items():
cell_data_dict[key][cell_type] = np.concatenate(val)
return cell_data_dict
@property
def cell_sets_dict(self):
sets_dict = {}
for key, member_list in self.cell_sets.items():
sets_dict[key] = {}
offsets = {}
for members, cells in zip(member_list, self.cells):
if members is None:
continue
if cells.type in offsets:
offset = offsets[cells.type]
offsets[cells.type] += cells.data.shape[0]
else:
offset = 0
offsets[cells.type] = cells.data.shape[0]
if cells.type in sets_dict[key]:
sets_dict[key][cells.type].append(members + offset)
else:
sets_dict[key][cells.type] = [members + offset]
return {
key: {
cell_type: np.concatenate(members)
for cell_type, members in sets.items()
if sum(map(np.size, members))
}
for key, sets in sets_dict.items()
}
@classmethod
def read(cls, path_or_buf, file_format=None):
# avoid circular import
from ._helpers import read
# 2021-02-21
warn("meshio.Mesh.read is deprecated, use meshio.read instead")
return read(path_or_buf, file_format)
def cell_sets_to_data(self, data_name: str | None = None):
# If possible, convert cell sets to integer cell data. This is possible if all
# cells appear exactly in one group.
default_value = -1
if len(self.cell_sets) > 0:
intfun = []
for k, c in enumerate(zip(*self.cell_sets.values())):
# Go for -1 as the default value. (NaN is not int.)
arr = np.full(len(self.cells[k]), default_value, dtype=int)
for i, cc in enumerate(c):
if cc is None:
continue
arr[cc] = i
intfun.append(arr)
for item in intfun:
num_default = np.sum(item == default_value)
if num_default > 0:
warn(
f"{num_default} cells are not part of any cell set. "
f"Using default value {default_value}."
)
break
if data_name is None:
data_name = "-".join(self.cell_sets.keys())
self.cell_data[data_name] = intfun
self.cell_sets = {}
def point_sets_to_data(self, join_char: str = "-") -> None:
# now for the point sets
# Go for -1 as the default value. (NaN is not int.)
default_value = -1
if len(self.point_sets) > 0:
intfun = np.full(len(self.points), default_value, dtype=int)
for i, cc in enumerate(self.point_sets.values()):
intfun[cc] = i
if np.any(intfun == default_value):
warn(
"Not all points are part of a point set. "
f"Using default value {default_value}."
)
data_name = join_char.join(self.point_sets.keys())
self.point_data[data_name] = intfun
self.point_sets = {}
# This used to be int_data_to_sets(), converting _all_ cell and point data.
# This is not useful in many cases, as one usually only wants one
# particular data array (e.g., "MaterialIDs") converted to sets.
def cell_data_to_sets(self, key: str):
"""Convert point_data to cell_sets."""
data = self.cell_data[key]
# handle all int and uint data
if not all(v.dtype.kind in ["i", "u"] for v in data):
raise RuntimeError(f"cell_data['{key}'] is not int data.")
tags = np.unique(np.concatenate(data))
# try and get the names by splitting the key along "-" (this is how
# sets_to_int_data() forms the key)
names = key.split("-")
# remove duplicates and preserve order
# :
names = list(dict.fromkeys(names))
if len(names) != len(tags):
# alternative names
names = [f"set-{key}-{tag}" for tag in tags]
# TODO there's probably a better way besides np.where, something from
# np.unique or np.sort
for name, tag in zip(names, tags):
self.cell_sets[name] = [np.where(d == tag)[0] for d in data]
# remove the cell data
del self.cell_data[key]
def point_data_to_sets(self, key: str):
"""Convert point_data to point_sets."""
data = self.point_data[key]
# handle all int and uint data
if not all(v.dtype.kind in ["i", "u"] for v in data):
raise RuntimeError(f"point_data['{key}'] is not int data.")
tags = np.unique(data)
# try and get the names by splitting the key along "-" (this is how
# sets_to_int_data() forms the key
names = key.split("-")
# remove duplicates and preserve order
# :
names = list(dict.fromkeys(names))
if len(names) != len(tags):
# alternative names
names = [f"set-key-{tag}" for tag in tags]
# TODO there's probably a better way besides np.where, something from
# np.unique or np.sort
for name, tag in zip(names, tags):
self.point_sets[name] = np.where(data == tag)[0]
# remove the cell data
del self.point_data[key]
src/meshio/_vtk_common.py 0000664 0000000 0000000 00000015721 14562440725 0016035 0 ustar 00root root 0000000 0000000 import numpy as np
from ._common import num_nodes_per_cell, warn
from ._exceptions import ReadError
from ._mesh import CellBlock
# https://vtk.org/doc/nightly/html/vtkCellType_8h_source.html
vtk_to_meshio_type = {
0: "empty",
1: "vertex",
# 2: 'poly_vertex',
3: "line",
# 4: 'poly_line',
5: "triangle",
# 6: 'triangle_strip',
7: "polygon",
8: "pixel",
9: "quad",
10: "tetra",
# 11: 'voxel',
12: "hexahedron",
13: "wedge",
14: "pyramid",
15: "penta_prism",
16: "hexa_prism",
21: "line3",
22: "triangle6",
23: "quad8",
24: "tetra10",
25: "hexahedron20",
26: "wedge15",
27: "pyramid13",
28: "quad9",
29: "hexahedron27",
30: "quad6",
31: "wedge12",
32: "wedge18",
33: "hexahedron24",
34: "triangle7",
35: "line4",
42: "polyhedron",
#
# 60: VTK_HIGHER_ORDER_EDGE,
# 61: VTK_HIGHER_ORDER_TRIANGLE,
# 62: VTK_HIGHER_ORDER_QUAD,
# 63: VTK_HIGHER_ORDER_POLYGON,
# 64: VTK_HIGHER_ORDER_TETRAHEDRON,
# 65: VTK_HIGHER_ORDER_WEDGE,
# 66: VTK_HIGHER_ORDER_PYRAMID,
# 67: VTK_HIGHER_ORDER_HEXAHEDRON,
# Arbitrary order Lagrange elements
68: "VTK_LAGRANGE_CURVE",
69: "VTK_LAGRANGE_TRIANGLE",
70: "VTK_LAGRANGE_QUADRILATERAL",
71: "VTK_LAGRANGE_TETRAHEDRON",
72: "VTK_LAGRANGE_HEXAHEDRON",
73: "VTK_LAGRANGE_WEDGE",
74: "VTK_LAGRANGE_PYRAMID",
# Arbitrary order Bezier elements
75: "VTK_BEZIER_CURVE",
76: "VTK_BEZIER_TRIANGLE",
77: "VTK_BEZIER_QUADRILATERAL",
78: "VTK_BEZIER_TETRAHEDRON",
79: "VTK_BEZIER_HEXAHEDRON",
80: "VTK_BEZIER_WEDGE",
81: "VTK_BEZIER_PYRAMID",
}
meshio_to_vtk_type = {v: k for k, v in vtk_to_meshio_type.items()}
def vtk_to_meshio_order(vtk_type, dtype=int):
# meshio uses the same node ordering as VTK for most cell types. However, for the
# linear wedge, the ordering of the gmsh Prism [1] is adopted since this is found in
# most codes (Abaqus, Ansys, Nastran,...). In the vtkWedge [2], the normal of the
# (0,1,2) triangle points outwards, while in gmsh this normal points inwards.
# [1] http://gmsh.info/doc/texinfo/gmsh.html#Node-ordering
# [2] https://vtk.org/doc/nightly/html/classvtkWedge.html
if vtk_type == 13:
return np.array([0, 2, 1, 3, 5, 4], dtype=dtype)
return None
def meshio_to_vtk_order(meshio_type, dtype=int):
if meshio_type == "wedge":
return np.array([0, 2, 1, 3, 5, 4], dtype=dtype)
return None
def vtk_cells_from_data(connectivity, offsets, types, cell_data_raw):
# Translate it into the cells array.
# `connectivity` is a one-dimensional vector with
# (p00, p01, ... ,p0k, p10, p11, ..., p1k, ...
# `offsets` is a pointer array that points to the first position of p0, p1, etc.
if len(offsets) != len(types):
raise ReadError(f"len(offsets) != len(types) ({len(offsets)} != {len(types)})")
# identify cell blocks
breaks = np.where(types[:-1] != types[1:])[0] + 1
# all cells with indices between start[k] and end[k] have the same type
start_end = list(
zip(
np.concatenate([[0], breaks]),
np.concatenate([breaks, [len(types)]]),
)
)
cells = []
cell_data = {}
for start, end in start_end:
try:
meshio_type = vtk_to_meshio_type[types[start]]
except KeyError:
warn(
f"File contains cells that meshio cannot handle (type {types[start]})."
)
continue
# cells with varying number of points
special_cells = [
"polygon",
"VTK_LAGRANGE_CURVE",
"VTK_LAGRANGE_TRIANGLE",
"VTK_LAGRANGE_QUADRILATERAL",
"VTK_LAGRANGE_TETRAHEDRON",
"VTK_LAGRANGE_HEXAHEDRON",
"VTK_LAGRANGE_WEDGE",
"VTK_LAGRANGE_PYRAMID",
]
if meshio_type in special_cells:
# Polygons have unknown and varying number of nodes per cell.
# Index where the previous block of cells stopped. Needed to know the number
# of nodes for the first cell in the block.
first_node = 0 if start == 0 else offsets[start - 1]
# Start off the cell-node relation for each cell in this block
start_cn = np.hstack((first_node, offsets[start:end]))
# Find the size of each cell except the last
sizes = np.diff(start_cn)
# find where the cell blocks start and end
b = np.diff(sizes)
c = np.concatenate([[0], np.where(b != 0)[0] + 1, [len(sizes)]])
# Loop over all cell sizes, find all cells with this size, and assign
# connectivity
for cell_block_start, cell_block_end in zip(c, c[1:]):
items = np.arange(cell_block_start, cell_block_end)
sz = sizes[cell_block_start]
new_order = vtk_to_meshio_order(types[start], dtype=offsets.dtype)
if new_order is None:
new_order = np.arange(sz, dtype=offsets.dtype)
new_order -= sz
indices = np.add.outer(start_cn[items + 1], new_order)
cells.append(CellBlock(meshio_type, connectivity[indices]))
# Store cell data for this set of cells
for name, d in cell_data_raw.items():
if name not in cell_data:
cell_data[name] = []
cell_data[name].append(d[start + items])
else:
# Non-polygonal cell. Same number of nodes per cell makes everything easier.
n = num_nodes_per_cell[meshio_type]
new_order = vtk_to_meshio_order(types[start], dtype=offsets.dtype)
if new_order is None:
new_order = np.arange(n, dtype=offsets.dtype)
new_order -= n
indices = np.add.outer(offsets[start:end], new_order)
cells.append(CellBlock(meshio_type, connectivity[indices]))
for name, d in cell_data_raw.items():
if name not in cell_data:
cell_data[name] = []
cell_data[name].append(d[start:end])
return cells, cell_data
class Info:
"""Info Container for the VTK reader."""
def __init__(self):
self.points = None
self.field_data = {}
self.cell_data_raw = {}
self.point_data = {}
self.dataset = {}
self.connectivity = None
self.offsets = None
self.types = None
self.active = None
self.is_ascii = False
self.split = []
self.num_items = 0
# One of the problem in reading VTK files are POINT_DATA and CELL_DATA fields.
# They can contain a number of SCALARS+LOOKUP_TABLE tables, without giving and
# indication of how many there are. Hence, SCALARS must be treated like a
# first-class section. To associate it with POINT/CELL_DATA, we store the
# `active` section in this variable.
self.section = None
src/meshio/abaqus/ 0000775 0000000 0000000 00000000000 14562440725 0014416 5 ustar 00root root 0000000 0000000 src/meshio/abaqus/__init__.py 0000664 0000000 0000000 00000000076 14562440725 0016532 0 ustar 00root root 0000000 0000000 from ._abaqus import read, write
__all__ = ["read", "write"]
src/meshio/abaqus/_abaqus.py 0000664 0000000 0000000 00000032442 14562440725 0016410 0 ustar 00root root 0000000 0000000 """
I/O for Abaqus inp files.
"""
import pathlib
from itertools import count
import numpy as np
from ..__about__ import __version__
from .._common import num_nodes_per_cell
from .._exceptions import ReadError
from .._files import open_file
from .._helpers import register_format
from .._mesh import CellBlock, Mesh
abaqus_to_meshio_type = {
# trusses
"T2D2": "line",
"T2D2H": "line",
"T2D3": "line3",
"T2D3H": "line3",
"T3D2": "line",
"T3D2H": "line",
"T3D3": "line3",
"T3D3H": "line3",
# beams
"B21": "line",
"B21H": "line",
"B22": "line3",
"B22H": "line3",
"B31": "line",
"B31H": "line",
"B32": "line3",
"B32H": "line3",
"B33": "line3",
"B33H": "line3",
# surfaces
"CPS4": "quad",
"CPS4R": "quad",
"S4": "quad",
"S4R": "quad",
"S4RS": "quad",
"S4RSW": "quad",
"S4R5": "quad",
"S8R": "quad8",
"S8R5": "quad8",
"S9R5": "quad9",
# "QUAD": "quad",
# "QUAD4": "quad",
# "QUAD5": "quad5",
# "QUAD8": "quad8",
# "QUAD9": "quad9",
#
"CPS3": "triangle",
"STRI3": "triangle",
"S3": "triangle",
"S3R": "triangle",
"S3RS": "triangle",
"R3D3": "triangle",
# "TRI7": "triangle7",
# 'TRISHELL': 'triangle',
# 'TRISHELL3': 'triangle',
# 'TRISHELL7': 'triangle',
#
"STRI65": "triangle6",
# 'TRISHELL6': 'triangle6',
# volumes
"C3D8": "hexahedron",
"C3D8H": "hexahedron",
"C3D8I": "hexahedron",
"C3D8IH": "hexahedron",
"C3D8R": "hexahedron",
"C3D8RH": "hexahedron",
# "HEX9": "hexahedron9",
"C3D20": "hexahedron20",
"C3D20H": "hexahedron20",
"C3D20R": "hexahedron20",
"C3D20RH": "hexahedron20",
# "HEX27": "hexahedron27",
#
"C3D4": "tetra",
"C3D4H": "tetra4",
# "TETRA8": "tetra8",
"C3D10": "tetra10",
"C3D10H": "tetra10",
"C3D10I": "tetra10",
"C3D10M": "tetra10",
"C3D10MH": "tetra10",
# "TETRA14": "tetra14",
#
# "PYRAMID": "pyramid",
"C3D6": "wedge",
"C3D15": "wedge15",
#
# 4-node bilinear displacement and pore pressure
"CAX4P": "quad",
# 6-node quadratic
"CPE6": "triangle6",
}
meshio_to_abaqus_type = {v: k for k, v in abaqus_to_meshio_type.items()}
def read(filename):
"""Reads a Abaqus inp file."""
with open_file(filename, "r") as f:
out = read_buffer(f)
return out
def read_buffer(f):
# Initialize the optional data fields
points = []
cells = []
cell_ids = []
point_sets = {}
cell_sets = {}
cell_sets_element = {} # Handle cell sets defined in ELEMENT
cell_sets_element_order = [] # Order of keys is not preserved in Python 3.5
field_data = {}
cell_data = {}
point_data = {}
point_ids = None
line = f.readline()
while True:
if not line: # EOF
break
# Comments
if line.startswith("**"):
line = f.readline()
continue
keyword = line.partition(",")[0].strip().replace("*", "").upper()
if keyword == "NODE":
points, point_ids, line = _read_nodes(f)
elif keyword == "ELEMENT":
if point_ids is None:
raise ReadError("Expected NODE before ELEMENT")
params_map = get_param_map(line, required_keys=["TYPE"])
cell_type, cells_data, ids, sets, line = _read_cells(
f, params_map, point_ids
)
cells.append(CellBlock(cell_type, cells_data))
cell_ids.append(ids)
if sets:
cell_sets_element.update(sets)
cell_sets_element_order += list(sets.keys())
elif keyword == "NSET":
params_map = get_param_map(line, required_keys=["NSET"])
set_ids, _, line = _read_set(f, params_map)
name = params_map["NSET"]
point_sets[name] = np.array(
[point_ids[point_id] for point_id in set_ids], dtype="int32"
)
elif keyword == "ELSET":
params_map = get_param_map(line, required_keys=["ELSET"])
set_ids, set_names, line = _read_set(f, params_map)
name = params_map["ELSET"]
cell_sets[name] = []
if set_ids.size:
for cell_ids_ in cell_ids:
cell_sets_ = np.array(
[
cell_ids_[set_id]
for set_id in set_ids
if set_id in cell_ids_
],
dtype="int32",
)
cell_sets[name].append(cell_sets_)
elif set_names:
for set_name in set_names:
if set_name in cell_sets.keys():
cell_sets[name].append(cell_sets[set_name])
elif set_name in cell_sets_element.keys():
cell_sets[name].append(cell_sets_element[set_name])
else:
raise ReadError(f"Unknown cell set '{set_name}'")
elif keyword == "INCLUDE":
# Splitting line to get external input file path (example: *INCLUDE,INPUT=wInclude_bulk.inp)
ext_input_file = pathlib.Path(line.split("=")[-1].strip())
if ext_input_file.exists() is False:
cd = pathlib.Path(f.name).parent
ext_input_file = cd / ext_input_file
# Read contents from external input file into mesh object
out = read(ext_input_file)
# Merge contents of external file only if it is containing mesh data
if len(out.points) > 0:
points, cells = merge(
out,
points,
cells,
point_data,
cell_data,
field_data,
point_sets,
cell_sets,
)
line = f.readline()
else:
# There are just too many Abaqus keywords to explicitly skip them.
line = f.readline()
# Parse cell sets defined in ELEMENT
for i, name in enumerate(cell_sets_element_order):
# Not sure whether this case would ever happen
if name in cell_sets.keys():
cell_sets[name][i] = cell_sets_element[name]
else:
cell_sets[name] = []
for ic in range(len(cells)):
cell_sets[name].append(
cell_sets_element[name] if i == ic else np.array([], dtype="int32")
)
return Mesh(
points,
cells,
point_data=point_data,
cell_data=cell_data,
field_data=field_data,
point_sets=point_sets,
cell_sets=cell_sets,
)
def _read_nodes(f):
points = []
point_ids = {}
counter = 0
while True:
line = f.readline()
if not line or line.startswith("*"):
break
if line.strip() == "":
continue
line = line.strip().split(",")
point_id, coords = line[0], line[1:]
point_ids[int(point_id)] = counter
points.append([float(x) for x in coords])
counter += 1
return np.array(points, dtype=float), point_ids, line
def _read_cells(f, params_map, point_ids):
etype = params_map["TYPE"]
if etype not in abaqus_to_meshio_type.keys():
raise ReadError(f"Element type not available: {etype}")
cell_type = abaqus_to_meshio_type[etype]
# ElementID + NodesIDs
num_data = num_nodes_per_cell[cell_type] + 1
idx = []
while True:
line = f.readline()
if not line or line.startswith("*"):
break
line = line.strip()
if line == "":
continue
idx += [int(k) for k in filter(None, line.split(","))]
# Check for expected number of data
if len(idx) % num_data != 0:
raise ReadError("Expected number of data items does not match element type")
idx = np.array(idx).reshape((-1, num_data))
cell_ids = dict(zip(idx[:, 0], count(0)))
cells = np.array([[point_ids[node] for node in elem] for elem in idx[:, 1:]])
cell_sets = (
{params_map["ELSET"]: np.arange(len(cells), dtype="int32")}
if "ELSET" in params_map.keys()
else {}
)
return cell_type, cells, cell_ids, cell_sets, line
def merge(
mesh, points, cells, point_data, cell_data, field_data, point_sets, cell_sets
):
"""
Merge Mesh object into existing containers for points, cells, sets, etc..
:param mesh:
:param points:
:param cells:
:param point_data:
:param cell_data:
:param field_data:
:param point_sets:
:param cell_sets:
:type mesh: Mesh
"""
ext_points = np.array([p for p in mesh.points])
if len(points) > 0:
new_point_id = points.shape[0]
# new_cell_id = len(cells) + 1
points = np.concatenate([points, ext_points])
else:
# new_cell_id = 0
new_point_id = 0
points = ext_points
cnt = 0
for c in mesh.cells:
new_data = np.array([d + new_point_id for d in c.data])
cells.append(CellBlock(c.type, new_data))
cnt += 1
# The following aren't currently included in the abaqus parser, and are therefore
# excluded?
# point_data.update(mesh.point_data)
# cell_data.update(mesh.cell_data)
# field_data.update(mesh.field_data)
# Update point and cell sets to account for change in cell and point ids
for key, val in mesh.point_sets.items():
point_sets[key] = [x + new_point_id for x in val]
# Todo: Add support for merging cell sets
# cellblockref = [[] for i in range(cnt-new_cell_id)]
# for key, val in mesh.cell_sets.items():
# cell_sets[key] = cellblockref + [np.array([x for x in val[0]])]
return points, cells
def get_param_map(word, required_keys=None):
"""
get the optional arguments on a line
Example
-------
>>> word = 'elset,instance=dummy2,generate'
>>> params = get_param_map(word, required_keys=['instance'])
params = {
'elset' : None,
'instance' : 'dummy2,
'generate' : None,
}
"""
if required_keys is None:
required_keys = []
words = word.split(",")
param_map = {}
for wordi in words:
if "=" not in wordi:
key = wordi.strip().upper()
value = None
else:
sword = wordi.split("=")
if len(sword) != 2:
raise ReadError(sword)
key = sword[0].strip().upper()
value = sword[1].strip()
param_map[key] = value
msg = ""
for key in required_keys:
if key not in param_map:
msg += f"{key} not found in {word}\n"
if msg:
raise RuntimeError(msg)
return param_map
def _read_set(f, params_map):
set_ids = []
set_names = []
while True:
line = f.readline()
if not line or line.startswith("*"):
break
if line.strip() == "":
continue
line = line.strip().strip(",").split(",")
if line[0].isnumeric():
set_ids += [int(k) for k in line]
else:
set_names.append(line[0])
set_ids = np.array(set_ids, dtype="int32")
if "GENERATE" in params_map:
if len(set_ids) != 3:
raise ReadError(set_ids)
set_ids = np.arange(set_ids[0], set_ids[1] + 1, set_ids[2], dtype="int32")
return set_ids, set_names, line
def write(
filename, mesh: Mesh, float_fmt: str = ".16e", translate_cell_names: bool = True
) -> None:
with open_file(filename, "wt") as f:
f.write("*HEADING\n")
f.write("Abaqus DataFile Version 6.14\n")
f.write(f"written by meshio v{__version__}\n")
f.write("*NODE\n")
fmt = ", ".join(["{}"] + ["{:" + float_fmt + "}"] * mesh.points.shape[1]) + "\n"
for k, x in enumerate(mesh.points):
f.write(fmt.format(k + 1, *x))
eid = 0
for cell_block in mesh.cells:
cell_type = cell_block.type
node_idcs = cell_block.data
name = (
meshio_to_abaqus_type[cell_type] if translate_cell_names else cell_type
)
f.write(f"*ELEMENT, TYPE={name}\n")
for row in node_idcs:
eid += 1
nids_strs = (str(nid + 1) for nid in row.tolist())
f.write(str(eid) + "," + ",".join(nids_strs) + "\n")
nnl = 8
offset = 0
for ic in range(len(mesh.cells)):
for k, v in mesh.cell_sets.items():
if len(v[ic]) > 0:
els = [str(i + 1 + offset) for i in v[ic]]
f.write(f"*ELSET, ELSET={k}\n")
f.write(
",\n".join(
",".join(els[i : i + nnl]) for i in range(0, len(els), nnl)
)
+ "\n"
)
offset += len(mesh.cells[ic].data)
for k, v in mesh.point_sets.items():
nds = [str(i + 1) for i in v]
f.write(f"*NSET, NSET={k}\n")
f.write(
",\n".join(",".join(nds[i : i + nnl]) for i in range(0, len(nds), nnl))
+ "\n"
)
# https://github.com/nschloe/meshio/issues/747#issuecomment-643479921
# f.write("*END")
register_format("abaqus", [".inp"], read, {"abaqus": write})
src/meshio/ansys/ 0000775 0000000 0000000 00000000000 14562440725 0014277 5 ustar 00root root 0000000 0000000 src/meshio/ansys/__init__.py 0000664 0000000 0000000 00000000075 14562440725 0016412 0 ustar 00root root 0000000 0000000 from ._ansys import read, write
__all__ = ["read", "write"]
src/meshio/ansys/_ansys.py 0000664 0000000 0000000 00000036005 14562440725 0016151 0 ustar 00root root 0000000 0000000 """
I/O for Ansys's msh format.
"""
import re
import numpy as np
from ..__about__ import __version__
from .._common import warn
from .._exceptions import ReadError, WriteError
from .._files import open_file
from .._helpers import register_format
from .._mesh import Mesh
def _skip_to(f, char):
c = None
while c != char:
c = f.read(1).decode()
def _skip_close(f, num_open_brackets):
while num_open_brackets > 0:
char = f.read(1).decode()
if char == "(":
num_open_brackets += 1
elif char == ")":
num_open_brackets -= 1
def _read_points(f, line, first_point_index_overall, last_point_index):
# If the line is self-contained, it is merely a declaration
# of the total number of points.
if line.count("(") == line.count(")"):
return None, None, None
# (3010 (zone-id first-index last-index type ND)
out = re.match("\\s*\\(\\s*(|20|30)10\\s*\\(([^\\)]*)\\).*", line)
assert out is not None
a = [int(num, 16) for num in out.group(2).split()]
if len(a) <= 4:
raise ReadError()
first_point_index = a[1]
# store the very first point index
if first_point_index_overall is None:
first_point_index_overall = first_point_index
# make sure that point arrays are subsequent
if last_point_index is not None:
if last_point_index + 1 != first_point_index:
raise ReadError()
last_point_index = a[2]
num_points = last_point_index - first_point_index + 1
dim = a[4]
# Skip ahead to the byte that opens the data block (might
# be the current line already).
last_char = line.strip()[-1]
while last_char != "(":
last_char = f.read(1).decode()
if out.group(1) == "":
# ASCII data
pts = np.empty((num_points, dim))
for k in range(num_points):
# skip ahead to the first line with data
line = ""
while line.strip() == "":
line = f.readline().decode()
dat = line.split()
if len(dat) != dim:
raise ReadError()
for d in range(dim):
pts[k][d] = float(dat[d])
else:
# binary data
if out.group(1) == "20":
dtype = np.float32
else:
if out.group(1) != "30":
ReadError(f"Expected keys '20' or '30', got {out.group(1)}.")
dtype = np.float64
# read point data
pts = np.fromfile(f, count=dim * num_points, dtype=dtype).reshape(
(num_points, dim)
)
# make sure that the data set is properly closed
_skip_close(f, 2)
return pts, first_point_index_overall, last_point_index
def _read_cells(f, line):
# If the line is self-contained, it is merely a declaration of the total number of
# points.
if line.count("(") == line.count(")"):
return None, None
out = re.match("\\s*\\(\\s*(|20|30)12\\s*\\(([^\\)]+)\\).*", line)
assert out is not None
a = [int(num, 16) for num in out.group(2).split()]
if len(a) <= 4:
raise ReadError()
first_index = a[1]
last_index = a[2]
num_cells = last_index - first_index + 1
zone_type = a[3]
element_type = a[4]
if zone_type == 0:
# dead zone
return None, None
key, num_nodes_per_cell = {
0: ("mixed", None),
1: ("triangle", 3),
2: ("tetra", 4),
3: ("quad", 4),
4: ("hexahedron", 8),
5: ("pyramid", 5),
6: ("wedge", 6),
}[element_type]
# Skip to the opening `(` and make sure that there's no non-whitespace character
# between the last closing bracket and the `(`.
if line.strip()[-1] != "(":
c = None
while True:
c = f.read(1).decode()
if c == "(":
break
if not re.match("\\s", c):
# Found a non-whitespace character before `(`.
# Assume this is just a declaration line then and
# skip to the closing bracket.
_skip_to(f, ")")
return None, None
if key == "mixed":
# From
# :
#
# > If a zone is of mixed type (element-type=0), it will have a body that
# > lists the element type of each cell.
#
# No idea where the information other than the element types is stored
# though. Skip for now.
data = None
else:
# read cell data
if out.group(1) == "":
# ASCII cells
data = np.empty((num_cells, num_nodes_per_cell), dtype=int)
for k in range(num_cells):
line = f.readline().decode()
dat = line.split()
if len(dat) != num_nodes_per_cell:
raise ReadError()
data[k] = [int(d, 16) for d in dat]
else:
if key == "mixed":
raise ReadError("Cannot read mixed cells in binary mode yet")
# binary cells
if out.group(1) == "20":
dtype = np.int32
else:
if out.group(1) != "30":
ReadError(f"Expected keys '20' or '30', got {out.group(1)}.")
dtype = np.int64
shape = (num_cells, num_nodes_per_cell)
count = shape[0] * shape[1]
data = np.fromfile(f, count=count, dtype=dtype).reshape(shape)
# make sure that the data set is properly closed
_skip_close(f, 2)
return key, data
def _read_faces(f, line):
# faces
# (13 (zone-id first-index last-index type element-type))
# If the line is self-contained, it is merely a declaration of
# the total number of points.
if line.count("(") == line.count(")"):
return {}
out = re.match("\\s*\\(\\s*(|20|30)13\\s*\\(([^\\)]+)\\).*", line)
assert out is not None
a = [int(num, 16) for num in out.group(2).split()]
if len(a) <= 4:
raise ReadError()
first_index = a[1]
last_index = a[2]
num_cells = last_index - first_index + 1
element_type = a[4]
element_type_to_key_num_nodes = {
0: ("mixed", None),
2: ("line", 2),
3: ("triangle", 3),
4: ("quad", 4),
}
key, num_nodes_per_cell = element_type_to_key_num_nodes[element_type]
# Skip ahead to the line that opens the data block (might be
# the current line already).
if line.strip()[-1] != "(":
_skip_to(f, "(")
data = {}
if out.group(1) == "":
# ASCII
if key == "mixed":
# From
# :
#
# > If the face zone is of mixed type (element-type = > 0), the body of the
# > section will include the face type and will appear as follows
# >
# > type v0 v1 v2 c0 c1
# >
for k in range(num_cells):
line = ""
while line.strip() == "":
line = f.readline().decode()
dat = line.split()
type_index = int(dat[0], 16)
if type_index == 0:
raise ReadError()
type_string, num_nodes_per_cell = element_type_to_key_num_nodes[
type_index
]
if len(dat) != num_nodes_per_cell + 3:
raise ReadError()
if type_string not in data:
data[type_string] = []
data[type_string].append(
[int(d, 16) for d in dat[1 : num_nodes_per_cell + 1]]
)
data = {key: np.array(data[key]) for key in data}
else:
# read cell data
data = np.empty((num_cells, num_nodes_per_cell), dtype=int)
for k in range(num_cells):
line = f.readline().decode()
dat = line.split()
# The body of a regular face section contains the grid connectivity, and
# each line appears as follows:
# n0 n1 n2 cr cl
# where n* are the defining nodes (vertices) of the face, and c* are the
# adjacent cells.
if len(dat) != num_nodes_per_cell + 2:
raise ReadError()
data[k] = [int(d, 16) for d in dat[:num_nodes_per_cell]]
data = {key: data}
else:
# binary
if out.group(1) == "20":
dtype = np.int32
else:
if out.group(1) != "30":
ReadError(f"Expected keys '20' or '30', got {out.group(1)}.")
dtype = np.int64
if key == "mixed":
raise ReadError("Mixed element type for binary faces not supported yet")
# Read cell data.
# The body of a regular face section contains the grid
# connectivity, and each line appears as follows:
# n0 n1 n2 cr cl
# where n* are the defining nodes (vertices) of the face,
# and c* are the adjacent cells.
shape = (num_cells, num_nodes_per_cell + 2)
count = shape[0] * shape[1]
data = np.fromfile(f, count=count, dtype=dtype).reshape(shape)
# Cut off the adjacent cell data.
data = data[:, :num_nodes_per_cell]
data = {key: data}
# make sure that the data set is properly closed
_skip_close(f, 2)
return data
def read(filename): # noqa: C901
# Initialize the data optional data fields
field_data = {}
cell_data = {}
point_data = {}
points = []
cells = []
first_point_index_overall = None
last_point_index = None
# read file in binary mode since some data might be binary
with open_file(filename, "rb") as f:
while True:
line = f.readline().decode()
if not line:
break
if line.strip() == "":
continue
# expect the line to have the form
# ( [...]
out = re.match("\\s*\\(\\s*([0-9]+).*", line)
if not out:
raise ReadError()
index = out.group(1)
if index == "0":
# Comment.
_skip_close(f, line.count("(") - line.count(")"))
elif index == "1":
# header
# (1 "")
_skip_close(f, line.count("(") - line.count(")"))
elif index == "2":
# dimensionality
# (2 3)
_skip_close(f, line.count("(") - line.count(")"))
elif re.match("(|20|30)10", index):
# points
pts, first_point_index_overall, last_point_index = _read_points(
f, line, first_point_index_overall, last_point_index
)
if pts is not None:
points.append(pts)
elif re.match("(|20|30)12", index):
# cells
# (2012 (zone-id first-index last-index type element-type))
key, data = _read_cells(f, line)
if data is not None:
cells.append((key, data))
elif re.match("(|20|30)13", index):
data = _read_faces(f, line)
for key in data:
cells.append((key, data[key]))
elif index == "39":
warn("Zone specification not supported yet. Skipping.")
_skip_close(f, line.count("(") - line.count(")"))
elif index == "45":
# (45 (2 fluid solid)())
obj = re.match("\\(45 \\([0-9]+ ([\\S]+) ([\\S]+)\\)\\(\\)\\)", line)
if obj:
warn(
f"Zone specification not supported yet ({obj.group(1)}, {obj.group(2)}). "
+ "Skipping.",
)
else:
warn("Zone specification not supported yet.")
else:
warn(f"Unknown index {index}. Skipping.")
# Skipping ahead to the next line with two closing brackets.
_skip_close(f, line.count("(") - line.count(")"))
points = np.concatenate(points)
# Gauge the cells with the first point_index.
for k, c in enumerate(cells):
cells[k] = (c[0], c[1] - first_point_index_overall)
return Mesh(
points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data
)
def write(filename, mesh, binary=True):
with open_file(filename, "wb") as fh:
# header
fh.write(f'(1 "meshio {__version__}")\n'.encode())
# dimension
num_points, dim = mesh.points.shape
if dim not in [2, 3]:
raise WriteError(f"Can only write dimension 2, 3, got {dim}.")
fh.write((f"(2 {dim})\n").encode())
# total number of nodes
first_node_index = 1
fh.write((f"(10 (0 {first_node_index:x} {num_points:x} 0))\n").encode())
# total number of cells
total_num_cells = sum(len(c) for c in mesh.cells)
fh.write((f"(12 (0 1 {total_num_cells:x} 0))\n").encode())
# Write nodes
key = "3010" if binary else "10"
fh.write(
f"({key} (1 {first_node_index:x} {num_points:x} 1 {dim:x})(\n".encode()
)
if binary:
mesh.points.tofile(fh)
fh.write(b"\n)")
fh.write(b"End of Binary Section 3010)\n")
else:
np.savetxt(fh, mesh.points, fmt="%.16e")
fh.write(b"))\n")
# Write cells
meshio_to_ansys_type = {
# "mixed": 0,
"triangle": 1,
"tetra": 2,
"quad": 3,
"hexahedron": 4,
"pyramid": 5,
"wedge": 6,
# "polyhedral": 7,
}
first_index = 0
binary_dtypes = {
# np.int16 is not allowed
np.dtype("int32"): "2012",
np.dtype("int64"): "3012",
}
for cell_block in mesh.cells:
cell_type = cell_block.type
values = cell_block.data
key = binary_dtypes[values.dtype] if binary else "12"
last_index = first_index + len(values) - 1
try:
ansys_cell_type = meshio_to_ansys_type[cell_type]
except KeyError:
legal_keys = ", ".join(meshio_to_ansys_type.keys())
raise KeyError(
f"Illegal ANSYS cell type '{cell_type}'. (legal: {legal_keys})"
)
fh.write(
f"({key} (1 {first_index:x} {last_index:x} 1 {ansys_cell_type})(\n".encode()
)
if binary:
(values + first_node_index).tofile(fh)
fh.write(b"\n)")
fh.write((f"End of Binary Section {key})\n").encode())
else:
np.savetxt(fh, values + first_node_index, fmt="%x")
fh.write(b"))\n")
first_index = last_index + 1
register_format("ansys", [".msh"], read, {"ansys": write})
src/meshio/avsucd/ 0000775 0000000 0000000 00000000000 14562440725 0014427 5 ustar 00root root 0000000 0000000 src/meshio/avsucd/__init__.py 0000664 0000000 0000000 00000000076 14562440725 0016543 0 ustar 00root root 0000000 0000000 from ._avsucd import read, write
__all__ = ["read", "write"]
src/meshio/avsucd/_avsucd.py 0000664 0000000 0000000 00000016326 14562440725 0016435 0 ustar 00root root 0000000 0000000 """
I/O for AVS-UCD format, cf.
.
"""
import numpy as np
from ..__about__ import __version__ as version
from .._common import _pick_first_int_data, warn
from .._files import open_file
from .._helpers import register_format
from .._mesh import CellBlock, Mesh
meshio_to_avsucd_type = {
"vertex": "pt",
"line": "line",
"triangle": "tri",
"quad": "quad",
"tetra": "tet",
"pyramid": "pyr",
"wedge": "prism",
"hexahedron": "hex",
}
avsucd_to_meshio_type = {v: k for k, v in meshio_to_avsucd_type.items()}
meshio_to_avsucd_order = {
"vertex": [0],
"line": [0, 1],
"triangle": [0, 1, 2],
"quad": [0, 1, 2, 3],
"tetra": [0, 1, 3, 2],
"pyramid": [4, 0, 1, 2, 3],
"wedge": [3, 4, 5, 0, 1, 2],
"hexahedron": [4, 5, 6, 7, 0, 1, 2, 3],
}
avsucd_to_meshio_order = {
k: (v if k != "pyramid" else [1, 2, 3, 4, 0])
for k, v in meshio_to_avsucd_order.items()
}
def read(filename):
with open_file(filename, "r") as f:
out = read_buffer(f)
return out
def read_buffer(f):
# Skip comments and unpack first line
num_nodes, num_cells, num_node_data, num_cell_data, _ = np.genfromtxt(
f, max_rows=1, dtype=int, comments="#"
)
# Read nodes
point_ids, points = _read_nodes(f, num_nodes)
# Read cells
cell_ids, cells, cell_data = _read_cells(f, num_cells, point_ids)
# Read node data
if num_node_data:
point_data = _read_data(f, num_nodes, point_ids)
else:
point_data = {}
# Read cell data
if num_cell_data:
cdata = _read_data(f, num_cells, cell_ids)
sections = np.cumsum([len(c[1]) for c in cells[:-1]])
for k, v in cdata.items():
cell_data[k] = np.split(v, sections)
return Mesh(points, cells, point_data=point_data, cell_data=cell_data)
def _read_nodes(f, num_nodes):
if num_nodes > 0:
data = np.genfromtxt(f, max_rows=num_nodes)
else:
data = np.empty((0, 3))
points_ids = {int(pid): i for i, pid in enumerate(data[:, 0])}
return points_ids, data[:, 1:]
def _read_cells(f, num_cells, point_ids):
cells = []
cell_ids = {}
cell_data = {"avsucd:material": []}
count = 0
for _ in range(num_cells):
line = f.readline().strip().split()
cell_id = int(line[0])
cell_mat = int(line[1])
cell_type = avsucd_to_meshio_type[line[2]]
corner = [point_ids[int(pid)] for pid in line[3:]]
if len(cells) > 0 and cells[-1][0] == cell_type:
cells[-1][1].append(corner)
cell_data["avsucd:material"][-1].append(cell_mat)
else:
cells.append((cell_type, [corner]))
cell_data["avsucd:material"].append([cell_mat])
cell_ids[cell_id] = count
count += 1
# Convert to numpy arrays
for k, (cell_type, cdata) in enumerate(cells):
cells[k] = CellBlock(
cell_type, np.array(cdata)[:, avsucd_to_meshio_order[cell_type]]
)
cell_data["avsucd:material"][k] = np.array(cell_data["avsucd:material"][k])
return cell_ids, cells, cell_data
def _read_data(f, num_entities, entity_ids):
line = f.readline().strip().split()
data_size = [int(i) for i in line[1:]]
labels = {}
data = {}
for i, dsize in enumerate(data_size):
line = f.readline().strip().split(",")
labels[i] = line[0].strip().replace(" ", "_")
data[labels[i]] = (
np.empty(num_entities) if dsize == 1 else np.empty((num_entities, dsize))
)
for _ in range(num_entities):
line = f.readline().strip().split()
eid = entity_ids[int(line[0])]
j = 0
for i, dsize in enumerate(data_size):
if dsize == 1:
data[labels[i]][eid] = float(line[j + 1])
else:
data[labels[i]][eid] = [
float(val) for val in line[j + 1 : j + 1 + dsize]
]
j += dsize
return data
def write(filename, mesh):
if len(mesh.points.shape) > 1 and mesh.points.shape[1] == 2:
warn(
"AVS-UCD requires 3D points, but 2D points given. "
"Appending 0 third component."
)
mesh.points = np.column_stack([mesh.points, np.zeros_like(mesh.points[:, 0])])
with open_file(filename, "w") as f:
# Write meshio version
f.write(f"# Written by meshio v{version}\n")
# Write first line
num_nodes = len(mesh.points)
num_cells = sum(len(c.data) for c in mesh.cells)
# Try to find an appropriate materials array
key, other = _pick_first_int_data(mesh.cell_data)
if key and other:
other_string = ", ".join(other)
warn(
"AVS-UCD can only write one cell data array. "
f"Picking {key}, skipping {other_string}."
)
material = (
np.concatenate(mesh.cell_data[key])
if key
else np.zeros(num_cells, dtype=int)
)
num_node_data = [
1 if v.ndim == 1 else v.shape[1] for v in mesh.point_data.values()
]
num_cell_data = [
1 if np.concatenate(v).ndim == 1 else np.concatenate(v).shape[1]
for k, v in mesh.cell_data.items()
if k != key
]
num_node_data_sum = sum(num_node_data)
num_cell_data_sum = sum(num_cell_data)
f.write(f"{num_nodes} {num_cells} {num_node_data_sum} {num_cell_data_sum} 0\n")
# Write nodes
_write_nodes(f, mesh.points)
# Write cells
_write_cells(f, mesh.cells, material)
# Write node data
if num_node_data_sum:
labels = mesh.point_data.keys()
data_array = np.column_stack([v for v in mesh.point_data.values()])
_write_data(
f, labels, data_array, num_nodes, num_node_data, num_node_data_sum
)
# Write cell data
if num_cell_data_sum:
labels = [k for k in mesh.cell_data.keys() if k != key]
data_array = np.column_stack(
[np.concatenate(v) for k, v in mesh.cell_data.items() if k != key]
)
_write_data(
f, labels, data_array, num_cells, num_cell_data, num_cell_data_sum
)
def _write_nodes(f, points):
for i, (x, y, z) in enumerate(points):
f.write(f"{i + 1} {x} {y} {z}\n")
def _write_cells(f, cells, material):
i = 0
for cell_block in cells:
cell_type = cell_block.type
v = cell_block.data
for cell in v[:, meshio_to_avsucd_order[cell_type]]:
cell_str = " ".join(str(c) for c in cell + 1)
f.write(
f"{i + 1} {material[i]} {meshio_to_avsucd_type[cell_type]} {cell_str}\n"
)
i += 1
def _write_data(f, labels, data_array, num_entities, num_data, num_data_sum):
num_data_str = " ".join(str(i) for i in num_data)
f.write(f"{len(num_data)} {num_data_str}\n")
for label in labels:
f.write(f"{label}, real\n")
data_array = np.column_stack((np.arange(1, num_entities + 1), data_array))
np.savetxt(f, data_array, delimiter=" ", fmt=["%d"] + ["%.14e"] * num_data_sum)
register_format("avsucd", [".avs"], read, {"avsucd": write})
src/meshio/cgns/ 0000775 0000000 0000000 00000000000 14562440725 0014074 5 ustar 00root root 0000000 0000000 src/meshio/cgns/__init__.py 0000664 0000000 0000000 00000000074 14562440725 0016206 0 ustar 00root root 0000000 0000000 from ._cgns import read, write
__all__ = ["read", "write"]
src/meshio/cgns/_cgns.py 0000664 0000000 0000000 00000005606 14562440725 0015546 0 ustar 00root root 0000000 0000000 """
CGNS
TODO link to specification?
"""
import numpy as np
from .._exceptions import ReadError
from .._helpers import register_format
from .._mesh import Mesh
def read(filename):
import h5py
f = h5py.File(filename, "r")
if "Base" not in f:
raise ReadError('Expected "Base" in file. Malformed CGNS?')
if "Zone1" not in f["Base"]:
raise ReadError('Expected "Zone1" in "Base". Malformed CGNS?')
x = f["Base"]["Zone1"]["GridCoordinates"]["CoordinateX"][" data"]
y = f["Base"]["Zone1"]["GridCoordinates"]["CoordinateY"][" data"]
z = f["Base"]["Zone1"]["GridCoordinates"]["CoordinateZ"][" data"]
points = np.column_stack([x, y, z])
# f["Base"]["Zone1"]["GridElements"]["ElementRange"][" data"])
idx_min, idx_max = f["Base"]["Zone1"]["GridElements"]["ElementRange"][" data"]
data = f["Base"]["Zone1"]["GridElements"]["ElementConnectivity"][" data"]
cells = np.array(data).reshape(idx_max, -1) - 1
# TODO how to distinguish cell types?
if cells.shape[1] != 4:
raise ReadError("Can only read tetrahedra.")
cells = [("tetra", cells)]
return Mesh(points, cells)
def write(filename, mesh, compression="gzip", compression_opts=4):
import h5py
f = h5py.File(filename, "w")
base = f.create_group("Base")
# TODO something is missing here
zone1 = base.create_group("Zone1")
coords = zone1.create_group("GridCoordinates")
# write points
coord_x = coords.create_group("CoordinateX")
coord_x.create_dataset(
" data",
data=mesh.points[:, 0],
compression=compression,
compression_opts=compression_opts,
)
coord_y = coords.create_group("CoordinateY")
coord_y.create_dataset(
" data",
data=mesh.points[:, 1],
compression=compression,
compression_opts=compression_opts,
)
coord_z = coords.create_group("CoordinateZ")
coord_z.create_dataset(
" data",
data=mesh.points[:, 2],
compression=compression,
compression_opts=compression_opts,
)
# write cells
# TODO write cells other than tetra
elems = zone1.create_group("GridElements")
rnge = elems.create_group("ElementRange")
for cell_block in mesh.cells:
if cell_block.type == "tetra":
rnge.create_dataset(
" data",
data=[1, cell_block.data.shape[0]],
compression=compression,
compression_opts=compression_opts,
)
conn = elems.create_group("ElementConnectivity")
for cell_block in mesh.cells:
if cell_block.type == "tetra":
conn.create_dataset(
" data",
data=cell_block.data.reshape(-1) + 1,
compression=compression,
compression_opts=compression_opts,
)
register_format("cgns", [".cgns"], read, {"cgns": write})
src/meshio/dolfin/ 0000775 0000000 0000000 00000000000 14562440725 0014415 5 ustar 00root root 0000000 0000000 src/meshio/dolfin/__init__.py 0000664 0000000 0000000 00000000076 14562440725 0016531 0 ustar 00root root 0000000 0000000 from ._dolfin import read, write
__all__ = ["read", "write"]
src/meshio/dolfin/_dolfin.py 0000664 0000000 0000000 00000017653 14562440725 0016415 0 ustar 00root root 0000000 0000000 """
I/O for DOLFIN's XML format, cf.
.
"""
import os
import pathlib
import re
from xml.etree import ElementTree as ET
import numpy as np
from .._common import warn
from .._exceptions import ReadError, WriteError
from .._helpers import register_format
from .._mesh import Mesh
def _read_mesh(filename):
dolfin_to_meshio_type = {"triangle": ("triangle", 3), "tetrahedron": ("tetra", 4)}
# Use iterparse() to avoid loading the entire file via parse(). iterparse()
# allows to discard elements (via clear()) after they have been processed.
# See .
dim = None
points = None
keys = None
cell_type = None
num_nodes_per_cell = None
cells = None
cell_tags = None
for event, elem in ET.iterparse(filename, events=("start", "end")):
if event == "end":
continue
if elem.tag == "dolfin":
# Don't be too strict with the assertion. Some mesh files don't have the
# proper tags.
# assert elem.attrib['nsmap'] \
# == '{\'dolfin\': \'https://fenicsproject.org/\'}'
pass
elif elem.tag == "mesh":
dim = int(elem.attrib["dim"])
cell_type, num_nodes_per_cell = dolfin_to_meshio_type[
elem.attrib["celltype"]
]
cell_tags = [f"v{i}" for i in range(num_nodes_per_cell)]
elif elem.tag == "vertices":
if dim is None:
raise ReadError("Expected `mesh` before `vertices`")
points = np.empty((int(elem.attrib["size"]), dim))
keys = ["x", "y"]
if dim == 3:
keys += ["z"]
elif elem.tag == "vertex":
if points is None or keys is None:
raise ReadError("Expected `vertices` before `vertex`")
k = int(elem.attrib["index"])
points[k] = [elem.attrib[key] for key in keys]
elif elem.tag == "cells":
if cell_type is None or num_nodes_per_cell is None:
raise ReadError("Expected `mesh` before `cells`")
cells = [
(
cell_type,
np.empty((int(elem.attrib["size"]), num_nodes_per_cell), dtype=int),
)
]
elif elem.tag in ["triangle", "tetrahedron"]:
k = int(elem.attrib["index"])
assert cells is not None
assert cell_tags is not None
cells[0][1][k] = [elem.attrib[t] for t in cell_tags]
else:
warn(f"Unknown entry {elem.tag}. Ignoring.")
elem.clear()
return points, cells, cell_type
def _read_cell_data(filename):
dolfin_type_to_numpy_type = {
"int": np.dtype("int"),
"float": np.dtype("float"),
"uint": np.dtype("uint"),
}
cell_data = {}
dir_name = pathlib.Path(filename).resolve().parent
# Loop over all files in the same directory as `filename`.
basename = pathlib.Path(filename).stem
for f in os.listdir(dir_name):
# Check if there are files by the name "_*.xml"; if yes,
# extract the * pattern and make it the name of the data set.
out = re.match(f"{basename}_([^\\.]+)\\.xml", f)
if not out:
continue
name = out.group(1)
parser = ET.XMLParser()
tree = ET.parse((dir_name / f).as_posix(), parser)
root = tree.getroot()
mesh_functions = list(root)
if len(mesh_functions) != 1:
raise ReadError("Can only handle one mesh function")
mesh_function = mesh_functions[0]
if mesh_function.tag != "mesh_function":
raise ReadError()
size = int(mesh_function.attrib["size"])
dtype = dolfin_type_to_numpy_type[mesh_function.attrib["type"]]
data = np.empty(size, dtype=dtype)
for child in mesh_function:
if child.tag != "entity":
raise ReadError()
idx = int(child.attrib["index"])
data[idx] = child.attrib["value"]
if name not in cell_data:
cell_data[name] = []
cell_data[name].append(data)
return cell_data
def read(filename):
points, cells, _ = _read_mesh(filename)
cell_data = _read_cell_data(filename)
return Mesh(points, cells, cell_data=cell_data)
def _write_mesh(filename, points, cell_type, cells):
stripped_cells = [c for c in cells if c.type == cell_type]
meshio_to_dolfin_type = {"triangle": "triangle", "tetra": "tetrahedron"}
if any(c.type != cell_type for c in cells):
discarded_cell_types = {c.type for c in cells if c.type != cell_type}
warn(
"DOLFIN XML can only handle one cell type at a time. "
+ f"Using {cell_type}, discarding {', '.join(discarded_cell_types)}.",
)
dim = points.shape[1]
if dim not in [2, 3]:
raise WriteError(f"Can only write dimension 2, 3, got {dim}.")
coord_names = ["x", "y"]
if dim == 3:
coord_names += ["z"]
with open(filename, "w") as f:
f.write("\n")
ct = meshio_to_dolfin_type[cell_type]
f.write(f' \n')
num_points = len(points)
f.write(f' \n')
for idx, point in enumerate(points):
s = " ".join(f'{xyz}="{p}"' for xyz, p in zip("xyz", point))
f.write(f' \n')
f.write(" \n")
num_cells = 0
for c in stripped_cells:
num_cells += len(c.data)
f.write(f' \n')
idx = 0
for cell_block in stripped_cells:
type_string = meshio_to_dolfin_type[cell_block.type]
for cell in cell_block.data:
s = " ".join(f'v{k}="{c}"' for k, c in enumerate(cell))
f.write(f' <{type_string} index="{idx}" {s} />\n')
idx += 1
f.write(" \n")
f.write(" \n")
f.write("")
def _numpy_type_to_dolfin_type(dtype):
types = {
"int": [np.int8, np.int16, np.int32, np.int64],
"uint": [np.uint8, np.uint16, np.uint32, np.uint64],
"float": [np.float16, np.float32, np.float64],
}
for key, numpy_types in types.items():
for numpy_type in numpy_types:
if np.issubdtype(dtype, numpy_type):
return key
raise WriteError("Could not convert NumPy data type to DOLFIN data type.")
def _write_cell_data(filename, dim, cell_data):
dolfin = ET.Element("dolfin", nsmap={"dolfin": "https://fenicsproject.org/"})
mesh_function = ET.SubElement(
dolfin,
"mesh_function",
type=_numpy_type_to_dolfin_type(cell_data.dtype),
dim=str(dim),
size=str(len(cell_data)),
)
for k, value in enumerate(cell_data):
ET.SubElement(mesh_function, "entity", index=str(k), value=repr(value))
tree = ET.ElementTree(dolfin)
tree.write(filename)
def write(filename, mesh):
warn("DOLFIN XML is a legacy format. Consider using XDMF instead.")
if any("tetra" == c.type for c in mesh.cells):
cell_type = "tetra"
elif any("triangle" == c.type for c in mesh.cells):
cell_type = "triangle"
else:
raise WriteError(
"DOLFIN XML only supports triangles and tetrahedra. "
"Consider using XDMF instead."
)
_write_mesh(filename, mesh.points, cell_type, mesh.cells)
for name, lst in mesh.cell_data.items():
for data in lst:
fname = os.path.splitext(filename)[0]
cell_data_filename = f"{fname}_{name}.xml"
dim = 2 if mesh.points.shape[1] == 2 or all(mesh.points[:, 2] == 0) else 3
_write_cell_data(cell_data_filename, dim, np.array(data))
register_format("dolfin-xml", [".xml"], read, {"dolfin-xml": write})
src/meshio/exodus/ 0000775 0000000 0000000 00000000000 14562440725 0014451 5 ustar 00root root 0000000 0000000 src/meshio/exodus/__init__.py 0000664 0000000 0000000 00000000076 14562440725 0016565 0 ustar 00root root 0000000 0000000 from ._exodus import read, write
__all__ = ["read", "write"]
src/meshio/exodus/_exodus.py 0000664 0000000 0000000 00000031421 14562440725 0016472 0 ustar 00root root 0000000 0000000 """
I/O for Exodus II.
See
,
in particular Appendix A (page 171, Implementation of EXODUS II with netCDF).
"""
import datetime
import re
import numpy as np
from ..__about__ import __version__
from .._common import warn
from .._exceptions import ReadError
from .._helpers import register_format
from .._mesh import Mesh
exodus_to_meshio_type = {
"SPHERE": "vertex",
# curves
"BEAM": "line",
"BEAM2": "line",
"BEAM3": "line3",
"BAR2": "line",
# surfaces
"SHELL": "quad",
"SHELL4": "quad",
"SHELL8": "quad8",
"SHELL9": "quad9",
"QUAD": "quad",
"QUAD4": "quad",
"QUAD5": "quad5",
"QUAD8": "quad8",
"QUAD9": "quad9",
#
"TRI": "triangle",
"TRIANGLE": "triangle",
"TRI3": "triangle",
"TRI6": "triangle6",
"TRI7": "triangle7",
# 'TRISHELL': 'triangle',
# 'TRISHELL3': 'triangle',
# 'TRISHELL6': 'triangle6',
# 'TRISHELL7': 'triangle',
#
# volumes
"HEX": "hexahedron",
"HEXAHEDRON": "hexahedron",
"HEX8": "hexahedron",
"HEX9": "hexahedron9",
"HEX20": "hexahedron20",
"HEX27": "hexahedron27",
#
"TETRA": "tetra",
"TETRA4": "tetra4",
"TET4": "tetra4",
"TETRA8": "tetra8",
"TETRA10": "tetra10",
"TETRA14": "tetra14",
#
"PYRAMID": "pyramid",
"WEDGE": "wedge",
}
meshio_to_exodus_type = {v: k for k, v in exodus_to_meshio_type.items()}
def read(filename): # noqa: C901
import netCDF4
with netCDF4.Dataset(filename) as nc:
# assert nc.version == np.float32(5.1)
# assert nc.api_version == np.float32(5.1)
# assert nc.floating_point_word_size == 8
# assert b''.join(nc.variables['coor_names'][0]) == b'X'
# assert b''.join(nc.variables['coor_names'][1]) == b'Y'
# assert b''.join(nc.variables['coor_names'][2]) == b'Z'
points = np.zeros((len(nc.dimensions["num_nodes"]), 3))
point_data_names = []
cell_data_names = []
pd = {}
cd = {}
cells = []
ns_names = []
# eb_names = []
ns = []
point_sets = {}
info = []
for key, value in nc.variables.items():
if key == "info_records":
value.set_auto_mask(False)
for c in value[:]:
try:
info += [b"".join(c).decode("UTF-8")]
except UnicodeDecodeError:
# https://github.com/nschloe/meshio/issues/983
pass
elif key == "qa_records":
value.set_auto_mask(False)
for val in value:
info += [b"".join(c).decode("UTF-8") for c in val[:]]
elif key[:7] == "connect":
meshio_type = exodus_to_meshio_type[value.elem_type.upper()]
cells.append((meshio_type, value[:] - 1))
elif key == "coord":
points = nc.variables["coord"][:].T
elif key == "coordx":
points[:, 0] = value[:]
elif key == "coordy":
points[:, 1] = value[:]
elif key == "coordz":
points[:, 2] = value[:]
elif key == "name_nod_var":
value.set_auto_mask(False)
point_data_names = [b"".join(c).decode("UTF-8") for c in value[:]]
elif key[:12] == "vals_nod_var":
idx = 0 if len(key) == 12 else int(key[12:]) - 1
value.set_auto_mask(False)
# For now only take the first value
pd[idx] = value[0]
if len(value) > 1:
warn("Skipping some time data")
elif key == "name_elem_var":
value.set_auto_mask(False)
cell_data_names = [b"".join(c).decode("UTF-8") for c in value[:]]
elif key[:13] == "vals_elem_var":
# eb: element block
m = re.match("vals_elem_var(\\d+)?(?:eb(\\d+))?", key)
idx = 0 if m.group(1) is None else int(m.group(1)) - 1
block = 0 if m.group(2) is None else int(m.group(2)) - 1
value.set_auto_mask(False)
# For now only take the first value
if idx not in cd:
cd[idx] = {}
cd[idx][block] = value[0]
if len(value) > 1:
warn("Skipping some time data")
elif key == "ns_names":
value.set_auto_mask(False)
ns_names = [b"".join(c).decode("UTF-8") for c in value[:]]
# elif key == "eb_names":
# value.set_auto_mask(False)
# eb_names = [b"".join(c).decode("UTF-8") for c in value[:]]
elif key.startswith("node_ns"): # Expected keys: node_ns1, node_ns2
ns.append(value[:] - 1) # Exodus is 1-based
# merge element block data; can't handle blocks yet
for k, value in cd.items():
cd[k] = np.concatenate(list(value.values()))
# Check if there are any R, Z tuples or X, Y, Z
# triplets in the point data. If yes, they belong together.
single, double, triple = categorize(point_data_names)
point_data = {}
for name, idx in single:
point_data[name] = pd[idx]
for name, idx0, idx1 in double:
point_data[name] = np.column_stack([pd[idx0], pd[idx1]])
for name, idx0, idx1, idx2 in triple:
point_data[name] = np.column_stack([pd[idx0], pd[idx1], pd[idx2]])
cell_data = {}
k = 0
for _, cell in cells:
n = len(cell)
for name, data in zip(cell_data_names, cd.values()):
if name not in cell_data:
cell_data[name] = []
cell_data[name].append(data[k : k + n])
k += n
point_sets = {name: dat for name, dat in zip(ns_names, ns)}
return Mesh(
points,
cells,
point_data=point_data,
cell_data=cell_data,
point_sets=point_sets,
info=info,
)
def categorize(names):
# Check if there are any R, Z tuples or X, Y, Z
# triplets in the point data. If yes, they belong together.
single = []
double = []
triple = []
is_accounted_for = [False] * len(names)
k = 0
while True:
if k == len(names):
break
if is_accounted_for[k]:
k += 1
continue
name = names[k]
if name[-1] == "X":
ix = k
try:
iy = names.index(name[:-1] + "Y")
except ValueError:
iy = None
try:
iz = names.index(name[:-1] + "Z")
except ValueError:
iz = None
if iy and iz:
triple.append((name[:-1], ix, iy, iz))
is_accounted_for[ix] = True
is_accounted_for[iy] = True
is_accounted_for[iz] = True
else:
single.append((name, ix))
is_accounted_for[ix] = True
elif name[-2:] == "_R":
ir = k
try:
iz = names.index(name[:-2] + "_Z")
except ValueError:
iz = None
if iz:
double.append((name[:-2], ir, iz))
is_accounted_for[ir] = True
is_accounted_for[iz] = True
else:
single.append((name, ir))
is_accounted_for[ir] = True
else:
single.append((name, k))
is_accounted_for[k] = True
k += 1
if not all(is_accounted_for):
raise ReadError()
return single, double, triple
numpy_to_exodus_dtype = {
"float32": "f4",
"float64": "f8",
"int8": "i1",
"int16": "i2",
"int32": "i4",
"int64": "i8",
"uint8": "u1",
"uint16": "u2",
"uint32": "u4",
"uint64": "u8",
}
def write(filename, mesh):
import netCDF4
with netCDF4.Dataset(filename, "w") as rootgrp:
# set global data
now = datetime.datetime.now().isoformat()
rootgrp.title = f"Created by meshio v{__version__}, {now}"
rootgrp.version = np.float32(5.1)
rootgrp.api_version = np.float32(5.1)
rootgrp.floating_point_word_size = 8
# set dimensions
total_num_elems = sum(c.data.shape[0] for c in mesh.cells)
rootgrp.createDimension("num_nodes", len(mesh.points))
rootgrp.createDimension("num_dim", mesh.points.shape[1])
rootgrp.createDimension("num_elem", total_num_elems)
rootgrp.createDimension("num_el_blk", len(mesh.cells))
rootgrp.createDimension("num_node_sets", len(mesh.point_sets))
rootgrp.createDimension("len_string", 33)
rootgrp.createDimension("len_line", 81)
rootgrp.createDimension("four", 4)
rootgrp.createDimension("time_step", None)
# dummy time step
data = rootgrp.createVariable("time_whole", "f4", ("time_step",))
data[:] = 0.0
# points
coor_names = rootgrp.createVariable(
"coor_names", "S1", ("num_dim", "len_string")
)
coor_names.set_auto_mask(False)
coor_names[0, 0] = b"X"
coor_names[1, 0] = b"Y"
if mesh.points.shape[1] == 3:
coor_names[2, 0] = b"Z"
data = rootgrp.createVariable(
"coord",
numpy_to_exodus_dtype[mesh.points.dtype.name],
("num_dim", "num_nodes"),
)
data[:] = mesh.points.T
# cells
# ParaView needs eb_prop1 -- some ID. The values don't seem to matter as
# long as they are different for the for different blocks.
data = rootgrp.createVariable("eb_prop1", "i4", "num_el_blk")
for k in range(len(mesh.cells)):
data[k] = k
for k, cell_block in enumerate(mesh.cells):
dim1 = f"num_el_in_blk{k + 1}"
dim2 = f"num_nod_per_el{k + 1}"
rootgrp.createDimension(dim1, cell_block.data.shape[0])
rootgrp.createDimension(dim2, cell_block.data.shape[1])
dtype = numpy_to_exodus_dtype[cell_block.data.dtype.name]
data = rootgrp.createVariable(f"connect{k + 1}", dtype, (dim1, dim2))
data.elem_type = meshio_to_exodus_type[cell_block.type]
# Exodus is 1-based
data[:] = cell_block.data + 1
# point data
# The variable `name_nod_var` holds the names and indices of the node variables, the
# variables `vals_nod_var{1,2,...}` hold the actual data.
num_nod_var = len(mesh.point_data)
if num_nod_var > 0:
rootgrp.createDimension("num_nod_var", num_nod_var)
# set names
point_data_names = rootgrp.createVariable(
"name_nod_var", "S1", ("num_nod_var", "len_string")
)
point_data_names.set_auto_mask(False)
for k, name in enumerate(mesh.point_data.keys()):
for i, letter in enumerate(name):
point_data_names[k, i] = letter.encode()
# Set data. ParaView might have some problems here, see
# .
for k, (name, data) in enumerate(mesh.point_data.items()):
for i, s in enumerate(data.shape):
rootgrp.createDimension(f"dim_nod_var{k}{i}", s)
dims = ["time_step"] + [
f"dim_nod_var{k}{i}" for i in range(len(data.shape))
]
node_data = rootgrp.createVariable(
f"vals_nod_var{k + 1}",
numpy_to_exodus_dtype[data.dtype.name],
tuple(dims),
fill_value=False,
)
node_data[0] = data
# node sets
num_point_sets = len(mesh.point_sets)
if num_point_sets > 0:
data = rootgrp.createVariable("ns_prop1", "i4", "num_node_sets")
data_names = rootgrp.createVariable(
"ns_names", "S1", ("num_node_sets", "len_string")
)
for k, name in enumerate(mesh.point_sets.keys()):
data[k] = k
for i, letter in enumerate(name):
data_names[k, i] = letter.encode()
for k, (key, values) in enumerate(mesh.point_sets.items()):
dim1 = f"num_nod_ns{k + 1}"
rootgrp.createDimension(dim1, values.shape[0])
dtype = numpy_to_exodus_dtype[values.dtype.name]
data = rootgrp.createVariable(f"node_ns{k + 1}", dtype, (dim1,))
# Exodus is 1-based
data[:] = values + 1
register_format("exodus", [".e", ".exo", ".ex2"], read, {"exodus": write})
src/meshio/flac3d/ 0000775 0000000 0000000 00000000000 14562440725 0014276 5 ustar 00root root 0000000 0000000 src/meshio/flac3d/__init__.py 0000664 0000000 0000000 00000000076 14562440725 0016412 0 ustar 00root root 0000000 0000000 from ._flac3d import read, write
__all__ = ["read", "write"]
src/meshio/flac3d/_flac3d.py 0000664 0000000 0000000 00000044346 14562440725 0016156 0 ustar 00root root 0000000 0000000 """
I/O for FLAC3D format.
"""
from __future__ import annotations
import re
import struct
import time
import numpy as np
from ..__about__ import __version__ as version
from .._common import warn
from .._exceptions import ReadError
from .._files import open_file
from .._helpers import register_format
from .._mesh import Mesh
meshio_only = {
"zone": {
"tetra": "tetra",
"tetra10": "tetra",
"pyramid": "pyramid",
"pyramid13": "pyramid",
"wedge": "wedge",
"wedge12": "wedge",
"wedge15": "wedge",
"wedge18": "wedge",
"hexahedron": "hexahedron",
"hexahedron20": "hexahedron",
"hexahedron24": "hexahedron",
"hexahedron27": "hexahedron",
},
"face": {
"triangle": "triangle",
"triangle6": "triangle",
"triangle7": "triangle",
"quad": "quad",
"quad8": "quad",
"quad9": "quad",
},
}
numnodes_to_meshio_type = {
"zone": {4: "tetra", 5: "pyramid", 6: "wedge", 8: "hexahedron"},
"face": {3: "triangle", 4: "quad"},
}
meshio_to_flac3d_type = {
"triangle": "T3",
"quad": "Q4",
"tetra": "T4",
"pyramid": "P5",
"wedge": "W6",
"hexahedron": "B8",
}
flac3d_to_meshio_order = {
"triangle": [0, 1, 2],
"quad": [0, 1, 2, 3],
"tetra": [0, 1, 2, 3],
"pyramid": [0, 1, 4, 2, 3],
"wedge": [0, 1, 3, 2, 4, 5],
"hexahedron": [0, 1, 4, 2, 3, 6, 7, 5],
}
meshio_to_flac3d_order = {
"triangle": [0, 1, 2],
"quad": [0, 1, 2, 3],
"tetra": [0, 1, 2, 3],
"pyramid": [0, 1, 3, 4, 2],
"wedge": [0, 1, 3, 2, 4, 5],
"hexahedron": [0, 1, 3, 4, 2, 7, 5, 6],
}
meshio_to_flac3d_order_2 = {
"tetra": [0, 2, 1, 3],
"pyramid": [0, 3, 1, 4, 2],
"wedge": [0, 2, 3, 1, 5, 4],
"hexahedron": [0, 3, 1, 4, 2, 5, 7, 6],
}
flag_to_numdim = {
"zone": 3,
"face": 2,
}
def _merge(a: dict, b: dict) -> dict:
return {**a, **b}
def read(filename):
"""Read FLAC3D f3grid grid file."""
# Read a small block of the file to assess its type
# See
with open_file(filename, "rb") as f:
block = f.read(8)
binary = b"\x00" in block
mode = "rb" if binary else "r"
with open_file(filename, mode) as f:
out = read_buffer(f, binary)
return out
def read_buffer(f, binary):
"""Read binary or ASCII file."""
points = []
point_ids = {}
f_cells = []
z_cells = []
f_cell_sets = {}
z_cell_sets = {}
f_cell_ids = []
z_cell_ids = []
pidx = 0
if binary:
# Not sure what the first bytes represent, the format might be wrong
# It does not seem to be useful anyway
_ = struct.unpack("<2I", f.read(8))
(num_nodes,) = struct.unpack(" 0:
f_inv = np.full(np.max(f_cell_ids) + 1, -1)
f_inv[f_cell_ids] = np.arange(len(f_cell_ids))
f_cell_sets = {key: f_inv[value] for key, value in f_cell_sets.items()}
if len(z_cell_ids) > 0:
z_inv = np.full(np.max(z_cell_ids) + 1, -1)
z_inv[z_cell_ids] = np.arange(len(z_cell_ids))
z_cell_sets = {
key: z_inv[value] + z_offset for key, value in z_cell_sets.items()
}
cell_sets = _merge(f_cell_sets, z_cell_sets)
# cell_sets contains the indices into the global cell list. Since this is
# split up into blocks, we need to split the cell_sets, too.
bins = np.cumsum([len(cb[1]) for cb in cell_blocks])
for key, data in cell_sets.items():
d = np.digitize(data, bins)
cell_sets[key] = [data[d == k] for k in range(len(cell_blocks))]
# assert len(cell_ids) == sum(len(block) for _, block in cell_blocks)
# also store the cell_ids
cell_data = {}
if len(cell_blocks) > 0:
cell_data = {
"cell_ids": np.split(
cell_ids, np.cumsum([len(block) for _, block in cell_blocks][:-1])
)
}
return Mesh(
points=np.array(points),
cells=cell_blocks,
cell_data=cell_data,
cell_sets=cell_sets,
)
def _read_point_ascii(buf_or_line):
"""Read point coordinates."""
pid = int(buf_or_line[1])
point = [float(l) for l in buf_or_line[2:]]
return pid, point
def _read_point_binary(buf_or_line):
"""Read point coordinates."""
pid, x, y, z = struct.unpack(" 0 and cell_type == cells[-1][0]:
cells[-1][1].append(cell)
else:
cells.append((cell_type, [cell]))
def split_f_z(mesh):
# FLAC3D makes a difference between ZONES (3D-cells only) and FACES
# (2D-cells only). Split cells into zcells and fcells, along with the cell
# sets etc.
zcells = []
fcells = []
for cell_block in mesh.cells:
if cell_block.type in meshio_only["zone"]:
zcells.append(cell_block)
elif cell_block.type in meshio_only["face"]:
fcells.append(cell_block)
zsets = {}
fsets = {}
for key, cset in mesh.cell_sets.items():
zsets[key] = []
fsets[key] = []
for cell_block, sblock in zip(mesh.cells, cset):
zsets[key].append(
sblock if cell_block.type in meshio_only["zone"] else None
)
fsets[key].append(
sblock if cell_block.type in meshio_only["face"] else None
)
# remove the data that is only None
zsets = {
key: value
for key, value in zsets.items()
if not all(item is None for item in value)
}
fsets = {
key: value
for key, value in fsets.items()
if not all(item is None for item in value)
}
# Right now, the zsets contain indices into the corresponding cell block.
# FLAC3D expects _global_ indices. Update.
cell_block_sizes = [len(cb) for cb in zcells]
for key, data in zsets.items():
gid = 0
for n, block in zip(cell_block_sizes, data):
block += gid
gid += n
# TODO not sure if fcells and zcells share a common global index
cell_block_sizes = [len(cb) for cb in fcells]
for key, data in fsets.items():
gid = 0
for n, block in zip(cell_block_sizes, data):
block += gid
gid += n
for label, values in zsets.items():
zsets[label] = np.concatenate(values)
for label, values in fsets.items():
fsets[label] = np.concatenate(values)
# flac3d indices start at 1
for label, values in zsets.items():
zsets[label] += 1
for label, values in fsets.items():
fsets[label] += 1
return zcells, fcells, zsets, fsets
def write(filename, mesh: Mesh, float_fmt: str = ".16e", binary: bool = False):
"""Write FLAC3D f3grid grid file."""
skip = [c.type for c in mesh.cells if c.type not in meshio_only["zone"]]
if skip:
warn(f'FLAC3D format only supports 3D cells. Skipping {", ".join(skip)}.')
# split into face/zone data
zcells, fcells, zsets, fsets = split_f_z(mesh)
mode = "wb" if binary else "w"
with open_file(filename, mode) as f:
if binary:
# Don't know what these values represent
f.write(struct.pack("<2I", 1375135718, 3))
else:
f.write(f"* FLAC3D grid produced by meshio v{version}\n")
f.write(f"* {time.ctime()}\n")
_write_points(f, mesh.points, binary, float_fmt)
# Make gid an array such that its value can be persitently altered
# inside the functions.
gid = np.array(0)
#
cells = _translate_zcells(mesh.points, mesh.cells)
_write_cells(f, cells, "zone", binary, gid)
_write_groups(f, mesh.cells, zsets, "zone", binary)
#
cells = _translate_fcells(fcells)
_write_cells(f, cells, "face", binary, gid)
_write_groups(f, mesh.cells, fsets, "face", binary)
def _write_points(f, points, binary, float_fmt=None):
"""Write points coordinates."""
if binary:
f.write(struct.pack(" None:
"""Write groups."""
if materials is None:
if binary:
f.write(struct.pack("
def slicing_summing(a, b, c):
c0 = b[:, 1] * c[:, 2] - b[:, 2] * c[:, 1]
c1 = b[:, 2] * c[:, 0] - b[:, 0] * c[:, 2]
c2 = b[:, 0] * c[:, 1] - b[:, 1] * c[:, 0]
return a[:, 0] * c0 + a[:, 1] * c1 + a[:, 2] * c2
zones = []
for cell_block in cells:
assert cell_block.type in meshio_only["zone"]
# Compute scalar triple products
key = meshio_only["zone"][cell_block.type]
tmp = points[cell_block.data[:, meshio_to_flac3d_order[key][:4]].T]
det = slicing_summing(tmp[1] - tmp[0], tmp[2] - tmp[0], tmp[3] - tmp[0])
# Reorder corner points
data = np.where(
(det > 0)[:, None],
cell_block.data[:, meshio_to_flac3d_order[key]],
cell_block.data[:, meshio_to_flac3d_order_2[key]],
)
zones.append((key, data))
return zones
def _translate_fcells(cells):
"""Reorder meshio cells to FLAC3D faces."""
faces = []
for cell_block in cells:
ctype, data = cell_block
assert ctype in meshio_only["face"]
key = meshio_only["face"][ctype]
data = cell_block.data[:, meshio_to_flac3d_order[key]]
faces.append((key, data))
return faces
def _translate_groups(cells, cell_data, field_data, flag):
"""Convert meshio cell_data to FLAC3D groups."""
dim = np.concatenate(
[np.full(len(c.data), 2 if c.type in meshio_only["face"] else 3) for c in cells]
)
numdim = flag_to_numdim[flag]
groups = {
k: np.nonzero(np.logical_and(cell_data == k, dim == numdim))[0] + 1
for k in np.unique(cell_data)
}
groups = {k: v for k, v in groups.items() if v.size}
labels = {k: str(k) for k in groups.keys()}
labels[0] = "None"
if field_data:
labels.update(
{v[0]: k for k, v in field_data.items() if v[1] == flag_to_numdim[flag]}
)
return dict(zip(labels.values(), groups.values()))
def _write_table(f, data, ncol: int = 20):
"""Write group data table."""
nrow = len(data) // ncol
lines = np.split(data, np.full(nrow, ncol).cumsum())
for line in lines:
if len(line):
f.write(" {}\n".format(" ".join([str(l) for l in line])))
register_format("flac3d", [".f3grid"], read, {"flac3d": write})
src/meshio/gmsh/ 0000775 0000000 0000000 00000000000 14562440725 0014100 5 ustar 00root root 0000000 0000000 src/meshio/gmsh/__init__.py 0000664 0000000 0000000 00000000351 14562440725 0016210 0 ustar 00root root 0000000 0000000 from .common import _gmsh_to_meshio_type as gmsh_to_meshio_type
from .common import _meshio_to_gmsh_type as meshio_to_gmsh_type
from .main import read, write
__all__ = ["read", "write", "gmsh_to_meshio_type", "meshio_to_gmsh_type"]
src/meshio/gmsh/_gmsh22.py 0000664 0000000 0000000 00000034534 14562440725 0015724 0 ustar 00root root 0000000 0000000 """
I/O for Gmsh's msh format, cf.
.
"""
from __future__ import annotations
import numpy as np
from .._common import cell_data_from_raw, num_nodes_per_cell, raw_from_cell_data, warn
from .._exceptions import ReadError
from .._mesh import CellBlock, Mesh
from .common import (
_fast_forward_over_blank_lines,
_fast_forward_to_end_block,
_gmsh_to_meshio_order,
_gmsh_to_meshio_type,
_meshio_to_gmsh_order,
_meshio_to_gmsh_type,
_read_data,
_read_physical_names,
_write_data,
_write_physical_names,
)
c_int = np.dtype("i")
c_double = np.dtype("d")
def read_buffer(f, is_ascii, data_size):
# The format is specified at
# .
# Initialize the optional data fields
points = []
cells = []
field_data = {}
cell_data_raw = {}
cell_tags = {}
point_data = {}
periodic = None
point_tags = None
has_additional_tag_data = False
while True:
# fast-forward over blank lines
line, is_eof = _fast_forward_over_blank_lines(f)
if is_eof:
break
if line[0] != "$":
raise ReadError(f"Unexpected line {repr(line)}")
environ = line[1:].strip()
if environ == "PhysicalNames":
_read_physical_names(f, field_data)
elif environ == "Nodes":
points, point_tags = _read_nodes(f, is_ascii)
elif environ == "Elements":
has_additional_tag_data, cell_tags = _read_cells(
f, cells, point_tags, is_ascii
)
elif environ == "Periodic":
periodic = _read_periodic(f)
elif environ == "NodeData":
_read_data(f, "NodeData", point_data, data_size, is_ascii)
elif environ == "ElementData":
_read_data(f, "ElementData", cell_data_raw, data_size, is_ascii)
else:
_fast_forward_to_end_block(f, environ)
if has_additional_tag_data:
warn("The file contains tag data that couldn't be processed.")
cell_data = cell_data_from_raw(cells, cell_data_raw)
# merge cell_tags into cell_data
for tag_name, tag_dict in cell_tags.items():
if tag_name not in cell_data:
cell_data[tag_name] = []
offset = {}
for cell_type, cell_array in cells:
start = offset.setdefault(cell_type, 0)
end = start + len(cell_array)
offset[cell_type] = end
tags = tag_dict.get(cell_type, [])
tags = np.array(tags[start:end], dtype=c_int)
cell_data[tag_name].append(tags)
return Mesh(
points,
cells,
point_data=point_data,
cell_data=cell_data,
field_data=field_data,
gmsh_periodic=periodic,
)
def _read_nodes(f, is_ascii):
# The first line is the number of nodes
line = f.readline().decode()
num_nodes = int(line)
if is_ascii:
points = np.fromfile(f, count=num_nodes * 4, sep=" ").reshape((num_nodes, 4))
# The first number is the index
point_tags = points[:, 0]
points = points[:, 1:]
else:
# binary
dtype = [("index", c_int), ("x", c_double, (3,))]
data = np.fromfile(f, count=num_nodes, dtype=dtype)
if not (data["index"] == range(1, num_nodes + 1)).all():
raise ReadError()
points = np.ascontiguousarray(data["x"])
point_tags = data["index"]
_fast_forward_to_end_block(f, "Nodes")
return points, point_tags
def _read_cells(f, cells, point_tags, is_ascii):
# The first line is the number of elements
line = f.readline().decode()
total_num_cells = int(line)
has_additional_tag_data = False
cell_tags = {}
if is_ascii:
_read_cells_ascii(f, cells, cell_tags, total_num_cells)
else:
_read_cells_binary(f, cells, cell_tags, total_num_cells)
# override cells in-place
cells[:] = [(key, _gmsh_to_meshio_order(key, values)) for key, values in cells]
point_tags = np.asarray(point_tags, dtype=np.int32) - 1
remap = -np.ones((np.max(point_tags) + 1,), dtype=np.int32)
remap[point_tags] = np.arange(point_tags.shape[0])
for ic, (ct, cd) in enumerate(cells):
cells[ic] = (ct, remap[cd])
_fast_forward_to_end_block(f, "Elements")
# restrict to the standard two data items (physical, geometrical)
output_cell_tags = {}
for cell_type in cell_tags:
physical = []
geometrical = []
for item in cell_tags[cell_type]:
if len(item) > 0:
physical.append(item[0])
if len(item) > 1:
geometrical.append(item[1])
if len(item) > 2:
has_additional_tag_data = True
physical = np.array(physical, dtype=c_int)
geometrical = np.array(geometrical, dtype=c_int)
if len(physical) > 0:
if "gmsh:physical" not in output_cell_tags:
output_cell_tags["gmsh:physical"] = {}
output_cell_tags["gmsh:physical"][cell_type] = physical
if len(geometrical) > 0:
if "gmsh:geometrical" not in output_cell_tags:
output_cell_tags["gmsh:geometrical"] = {}
output_cell_tags["gmsh:geometrical"][cell_type] = geometrical
return has_additional_tag_data, output_cell_tags
def _read_cells_ascii(f, cells, cell_tags, total_num_cells: int) -> None:
for _ in range(total_num_cells):
line = f.readline().decode()
data = [int(k) for k in filter(None, line.split())]
t = _gmsh_to_meshio_type[data[1]]
num_nodes_per_elem = num_nodes_per_cell[t]
if len(cells) == 0 or t != cells[-1][0]:
cells.append((t, []))
cells[-1][1].append(data[-num_nodes_per_elem:])
# data[2] gives the number of tags. The gmsh manual
#
# says:
# >>>
# By default, the first tag is the number of the physical entity to which the
# element belongs; the second is the number of the elementary geometrical entity
# to which the element belongs; the third is the number of mesh partitions to
# which the element belongs, followed by the partition ids (negative partition
# ids indicate ghost cells). A zero tag is equivalent to no tag. Gmsh and most
# codes using the MSH 2 format require at least the first two tags (physical and
# elementary tags).
# <<<
num_tags = data[2]
if t not in cell_tags:
cell_tags[t] = []
cell_tags[t].append(data[3 : 3 + num_tags])
# convert to numpy arrays
# Subtract one to account for the fact that python indices are 0-based.
for k, c in enumerate(cells):
cells[k] = (c[0], np.array(c[1], dtype=c_int) - 1)
# Cannot convert cell_tags[key] to numpy array: There may be a different number of
# tags for each cell.
def _read_cells_binary(f, cells, cell_tags, total_num_cells):
num_elems = 0
while num_elems < total_num_cells:
# read element header
elem_type, num_elems0, num_tags = np.fromfile(f, count=3, dtype=c_int)
t = _gmsh_to_meshio_type[elem_type]
num_nodes_per_elem = num_nodes_per_cell[t]
# read element data
shape = (num_elems0, 1 + num_tags + num_nodes_per_elem)
count = shape[0] * shape[1]
data = np.fromfile(f, count=count, dtype=c_int).reshape(shape)
if len(cells) == 0 or t != cells[-1][0]:
cells.append((t, []))
cells[-1][1].append(data[:, -num_nodes_per_elem:])
if t not in cell_tags:
cell_tags[t] = []
cell_tags[t].append(data[:, 1 : num_tags + 1])
num_elems += num_elems0
# collect cells
for k, c in enumerate(cells):
cells[k] = (c[0], np.vstack(c[1]) - 1)
# collect cell tags
for key in cell_tags:
cell_tags[key] = np.vstack(cell_tags[key])
def _read_periodic(f):
periodic = []
num_periodic = int(f.readline().decode())
for _ in range(num_periodic):
line = f.readline().decode()
edim, stag, mtag = (int(s) for s in line.split())
line = f.readline().decode().strip()
if line.startswith("Affine"):
affine = line.replace("Affine", "", 1)
affine = np.fromstring(affine, float, sep=" ")
num_nodes = int(f.readline().decode())
else:
affine = None
num_nodes = int(line)
slave_master = []
for _ in range(num_nodes):
line = f.readline().decode()
snode, mnode = (int(s) for s in line.split())
slave_master.append([snode, mnode])
slave_master = np.array(slave_master, dtype=c_int).reshape(-1, 2)
slave_master -= 1 # Subtract one, Python is 0-based
periodic.append([edim, (stag, mtag), affine, slave_master])
_fast_forward_to_end_block(f, "Periodic")
return periodic
def write(filename, mesh, float_fmt=".16e", binary=True):
"""Writes msh files, cf.
.
"""
# Filter the point data: gmsh:dim_tags are tags, the rest is actual point data.
point_data = {}
for key, d in mesh.point_data.items():
if key not in ["gmsh:dim_tags"]:
point_data[key] = d
# Split the cell data: gmsh:physical and gmsh:geometrical are tags, the rest is
# actual cell data.
tag_data = {}
cell_data = {}
for key, d in mesh.cell_data.items():
if key in ["gmsh:physical", "gmsh:geometrical", "cell_tags"]:
tag_data[key] = d
else:
cell_data[key] = d
# Always include the physical and geometrical tags. See also the quoted excerpt from
# the gmsh documentation in the _read_cells_ascii function above.
for tag in ["gmsh:physical", "gmsh:geometrical"]:
if tag not in tag_data:
warn(f"Appending zeros to replace the missing {tag[5:]} tag data.")
tag_data[tag] = [
np.zeros(len(cell_block), dtype=c_int) for cell_block in mesh.cells
]
with open(filename, "wb") as fh:
mode_idx = 1 if binary else 0
size_of_double = 8
fh.write(f"$MeshFormat\n2.2 {mode_idx} {size_of_double}\n".encode())
if binary:
np.array([1], dtype=c_int).tofile(fh)
fh.write(b"\n")
fh.write(b"$EndMeshFormat\n")
if mesh.field_data:
_write_physical_names(fh, mesh.field_data)
_write_nodes(fh, mesh.points, float_fmt, binary)
_write_elements(fh, mesh.cells, tag_data, binary)
if mesh.gmsh_periodic is not None:
_write_periodic(fh, mesh.gmsh_periodic, float_fmt)
for name, dat in point_data.items():
_write_data(fh, "NodeData", name, dat, binary)
cell_data_raw = raw_from_cell_data(cell_data)
for name, dat in cell_data_raw.items():
_write_data(fh, "ElementData", name, dat, binary)
def _write_nodes(fh, points, float_fmt, binary):
if points.shape[1] == 2:
# msh2 requires 3D points, but 2D points given. Appending 0 third component.
points = np.column_stack([points, np.zeros_like(points[:, 0])])
fh.write(b"$Nodes\n")
fh.write(f"{len(points)}\n".encode())
if binary:
dtype = [("index", c_int), ("x", c_double, (3,))]
tmp = np.empty(len(points), dtype=dtype)
tmp["index"] = 1 + np.arange(len(points))
tmp["x"] = points
tmp.tofile(fh)
fh.write(b"\n")
else:
fmt = "{} " + " ".join(3 * ["{:" + float_fmt + "}"]) + "\n"
for k, x in enumerate(points):
fh.write(fmt.format(k + 1, x[0], x[1], x[2]).encode())
fh.write(b"$EndNodes\n")
def _write_elements(fh, cells: list[CellBlock], tag_data, binary: bool):
# write elements
fh.write(b"$Elements\n")
# count all cells
total_num_cells = sum(len(cell_block) for cell_block in cells)
fh.write(f"{total_num_cells}\n".encode())
consecutive_index = 0
for k, cell_block in enumerate(cells):
cell_type = cell_block.type
node_idcs = _meshio_to_gmsh_order(cell_type, cell_block.data)
tags = []
for name in ["gmsh:physical", "gmsh:geometrical", "cell_tags"]:
if name in tag_data:
tags.append(tag_data[name][k])
fcd = np.concatenate([tags]).astype(c_int).T
if len(fcd) == 0:
fcd = np.empty((len(node_idcs), 0), dtype=c_int)
if binary:
# header
header = [_meshio_to_gmsh_type[cell_type], node_idcs.shape[0], fcd.shape[1]]
np.array(header, dtype=c_int).tofile(fh)
# actual data
a = np.arange(len(node_idcs), dtype=c_int)[:, np.newaxis]
a += 1 + consecutive_index
array = np.hstack([a, fcd, node_idcs + 1])
if array.dtype != c_int:
array = array.astype(c_int)
array.tofile(fh)
else:
form = (
"{} "
+ str(_meshio_to_gmsh_type[cell_type])
+ " "
+ str(fcd.shape[1])
+ " {} {}\n"
)
for i, c in enumerate(node_idcs):
fh.write(
form.format(
consecutive_index + i + 1,
" ".join([str(val) for val in fcd[i]]),
# a bit clumsy for `c+1`, but if c is uint64, c+1 is float64
" ".join([str(cc) for cc in c + np.array(1, dtype=c.dtype)]),
).encode()
)
consecutive_index += len(node_idcs)
if binary:
fh.write(b"\n")
fh.write(b"$EndElements\n")
def _write_periodic(fh, periodic, float_fmt):
fh.write(b"$Periodic\n")
fh.write(f"{len(periodic)}\n".encode())
for dim, (stag, mtag), affine, slave_master in periodic:
fh.write(f"{dim} {stag} {mtag}\n".encode())
if affine is not None:
fh.write(b"Affine ")
affine = np.array(affine, dtype=float)
affine = np.atleast_2d(affine.ravel())
np.savetxt(fh, affine, fmt="%" + float_fmt)
slave_master = np.array(slave_master, dtype=c_int).reshape(-1, 2)
slave_master = slave_master + 1 # Add one, Gmsh is 0-based
fh.write(f"{len(slave_master)}\n".encode())
for snode, mnode in slave_master:
fh.write(f"{snode} {mnode}\n".encode())
fh.write(b"$EndPeriodic\n")
src/meshio/gmsh/_gmsh40.py 0000664 0000000 0000000 00000034504 14562440725 0015721 0 ustar 00root root 0000000 0000000 """
I/O for Gmsh's msh format (version 4.0, as used by Gmsh 4.1.5), cf.
.
"""
from __future__ import annotations
from functools import partial
import numpy as np
from .._common import cell_data_from_raw, num_nodes_per_cell, raw_from_cell_data
from .._exceptions import ReadError
from .._mesh import CellBlock, Mesh
from .common import (
_fast_forward_to_end_block,
_gmsh_to_meshio_order,
_gmsh_to_meshio_type,
_meshio_to_gmsh_order,
_meshio_to_gmsh_type,
_read_data,
_read_physical_names,
_write_data,
_write_physical_names,
)
c_int = np.dtype("i")
c_long = np.dtype("l")
c_ulong = np.dtype("L")
c_double = np.dtype("d")
def read_buffer(f, is_ascii: bool, data_size) -> Mesh:
# Initialize the optional data fields
points = []
field_data = {}
cell_data_raw = {}
cell_tags = {}
point_data = {}
physical_tags = None
periodic = None
while True:
line = f.readline().decode()
if not line:
# EOF
break
if line[0] != "$":
raise ReadError
environ = line[1:].strip()
if environ == "PhysicalNames":
_read_physical_names(f, field_data)
elif environ == "Entities":
physical_tags = _read_entities(f, is_ascii)
elif environ == "Nodes":
points, point_tags = _read_nodes(f, is_ascii)
elif environ == "Elements":
cells, cell_tags = _read_elements(f, point_tags, physical_tags, is_ascii)
elif environ == "Periodic":
periodic = _read_periodic(f, is_ascii)
elif environ == "NodeData":
_read_data(f, "NodeData", point_data, data_size, is_ascii)
elif environ == "ElementData":
_read_data(f, "ElementData", cell_data_raw, data_size, is_ascii)
else:
# From
# :
# ```
# Any section with an unrecognized header is simply ignored: you can thus
# add comments in a .msh file by putting them e.g. inside a
# $Comments/$EndComments section.
# ```
# skip environment
_fast_forward_to_end_block(f, environ)
cell_data = cell_data_from_raw(cells, cell_data_raw)
cell_data.update(cell_tags)
return Mesh(
points,
cells,
point_data=point_data,
cell_data=cell_data,
field_data=field_data,
gmsh_periodic=periodic,
)
def _read_entities(f, is_ascii: bool):
physical_tags = tuple({} for _ in range(4)) # dims 0, 1, 2, 3
fromfile = partial(np.fromfile, sep=" " if is_ascii else "")
number = fromfile(f, c_ulong, 4) # dims 0, 1, 2, 3
for d, n in enumerate(number):
for _ in range(n):
tag = int(fromfile(f, c_int, 1)[0])
fromfile(f, c_double, 6) # discard boxMinX...boxMaxZ
num_physicals = int(fromfile(f, c_ulong, 1)[0])
physical_tags[d][tag] = list(fromfile(f, c_int, num_physicals))
if d > 0: # discard tagBREP{Vert,Curve,Surfaces}
num_BREP = int(fromfile(f, c_ulong, 1)[0])
fromfile(f, c_int, num_BREP)
_fast_forward_to_end_block(f, "Entities")
return physical_tags
def _read_nodes(f, is_ascii):
if is_ascii:
# first line: numEntityBlocks(unsigned long) numNodes(unsigned long)
line = f.readline().decode()
num_entity_blocks, total_num_nodes = (int(k) for k in line.split())
points = np.empty((total_num_nodes, 3), dtype=float)
tags = np.empty(total_num_nodes, dtype=int)
idx = 0
for _ in range(num_entity_blocks):
# first line in the entity block:
# tagEntity(int) dimEntity(int) typeNode(int) numNodes(unsigned long)
line = f.readline().decode()
_, _, _, num_nodes = map(int, line.split())
for _ in range(num_nodes):
# tag(int) x(double) y(double) z(double)
line = f.readline().decode()
tag, x, y, z = line.split()
points[idx] = [float(x), float(y), float(z)]
tags[idx] = tag
idx += 1
else:
# numEntityBlocks(unsigned long) numNodes(unsigned long)
num_entity_blocks, _ = np.fromfile(f, count=2, dtype=c_ulong)
points = []
tags = []
for _ in range(num_entity_blocks):
# tagEntity(int) dimEntity(int) typeNode(int) numNodes(unsigned long)
np.fromfile(f, count=3, dtype=c_int)
num_nodes = np.fromfile(f, count=1, dtype=c_ulong)[0]
dtype = [("tag", c_int), ("x", c_double, (3,))]
data = np.fromfile(f, count=num_nodes, dtype=dtype)
tags.append(data["tag"])
points.append(data["x"])
tags = np.concatenate(tags)
points = np.concatenate(points)
line = f.readline().decode()
if line != "\n":
raise ReadError()
_fast_forward_to_end_block(f, "Nodes")
return points, tags
def _read_elements(f, point_tags, physical_tags, is_ascii):
fromfile = partial(np.fromfile, sep=" " if is_ascii else "")
# numEntityBlocks(unsigned long) numElements(unsigned long)
num_entity_blocks, _ = fromfile(f, c_ulong, 2)
data = []
for _ in range(num_entity_blocks):
# tagEntity(int) dimEntity(int) typeEle(int) numElements(unsigned long)
tag_entity, dim_entity, type_ele = fromfile(f, c_int, 3)
(num_ele,) = fromfile(f, c_ulong, 1)
tpe = _gmsh_to_meshio_type[type_ele]
num_nodes_per_ele = num_nodes_per_cell[tpe]
d = fromfile(f, c_int, int(num_ele * (1 + num_nodes_per_ele))).reshape(
(num_ele, -1)
)
if physical_tags is None:
data.append((None, tag_entity, tpe, d))
else:
data.append((physical_tags[dim_entity][tag_entity], tag_entity, tpe, d))
_fast_forward_to_end_block(f, "Elements")
# The msh4 elements array refers to the nodes by their tag, not the index. All other
# mesh formats use the index, which is far more efficient, too. Hence,
# unfortunately, we have to do a fairly expensive conversion here.
m = np.max(point_tags + 1)
itags = -np.ones(m, dtype=int)
itags[point_tags] = np.arange(len(point_tags))
# Note that the first column in the data array is the element tag; discard it.
data = [
(physical_tag, geom_tag, tpe, itags[d[:, 1:]])
for physical_tag, geom_tag, tpe, d in data
]
cells = []
cell_data = {}
for physical_tag, geom_tag, key, values in data:
cells.append(CellBlock(key, _gmsh_to_meshio_order(key, values)))
if physical_tag:
if "gmsh:physical" not in cell_data:
cell_data["gmsh:physical"] = []
cell_data["gmsh:physical"].append(
np.full(len(values), physical_tag[0], int)
)
if "gmsh:geometrical" not in cell_data:
cell_data["gmsh:geometrical"] = []
cell_data["gmsh:geometrical"].append(np.full(len(values), geom_tag, int))
return cells, cell_data
def _read_periodic(f, is_ascii):
fromfile = partial(np.fromfile, sep=" " if is_ascii else "")
periodic = []
num_periodic = int(fromfile(f, c_int, 1)[0])
for _ in range(num_periodic):
edim, stag, mtag = fromfile(f, c_int, 3)
if is_ascii:
line = f.readline().decode().strip()
if line.startswith("Affine"):
affine = line.replace("Affine", "", 1)
affine = np.fromstring(affine, float, sep=" ")
num_nodes = int(f.readline().decode())
else:
affine = None
num_nodes = int(line)
else:
num_nodes = int(fromfile(f, c_long, 1)[0])
if num_nodes < 0:
affine = fromfile(f, c_double, 16)
num_nodes = int(fromfile(f, c_ulong, 1)[0])
else:
affine = None
slave_master = fromfile(f, c_int, num_nodes * 2).reshape(-1, 2)
slave_master = slave_master - 1 # Subtract one, Python is 0-based
periodic.append([edim, (stag, mtag), affine, slave_master])
_fast_forward_to_end_block(f, "Periodic")
return periodic
def write(filename, mesh: Mesh, float_fmt: str = ".16e", binary: bool = True) -> None:
"""Writes msh files, cf.
.
"""
with open(filename, "wb") as fh:
mode_idx = 1 if binary else 0
size_of_double = 8
fh.write(f"$MeshFormat\n4.0 {mode_idx} {size_of_double}\n".encode())
if binary:
np.array([1], dtype=c_int).tofile(fh)
fh.write(b"\n")
fh.write(b"$EndMeshFormat\n")
if mesh.field_data:
_write_physical_names(fh, mesh.field_data)
_write_nodes(fh, mesh.points, float_fmt, binary)
_write_elements(fh, mesh.cells, binary)
if mesh.gmsh_periodic is not None:
_write_periodic(fh, mesh.gmsh_periodic, float_fmt, binary)
for name, dat in mesh.point_data.items():
_write_data(fh, "NodeData", name, dat, binary)
cell_data_raw = raw_from_cell_data(mesh.cell_data)
for name, dat in cell_data_raw.items():
_write_data(fh, "ElementData", name, dat, binary)
def _write_nodes(fh, points: np.ndarray, float_fmt: str, binary: bool) -> None:
if points.shape[1] == 2:
points = np.column_stack([points, np.zeros_like(points[:, 0])])
fh.write(b"$Nodes\n")
# TODO not sure what dimEntity is supposed to say
dim_entity = 0
type_node = 0
if binary:
# write all points as one big block
# numEntityBlocks(unsigned long) numNodes(unsigned long)
# tagEntity(int) dimEntity(int) typeNode(int) numNodes(unsigned long)
# tag(int) x(double) y(double) z(double)
np.array([1, points.shape[0]], dtype=c_ulong).tofile(fh)
np.array([1, dim_entity, type_node], dtype=c_int).tofile(fh)
np.array([points.shape[0]], dtype=c_ulong).tofile(fh)
dtype = [("index", c_int), ("x", c_double, (3,))]
tmp = np.empty(len(points), dtype=dtype)
tmp["index"] = 1 + np.arange(len(points))
tmp["x"] = points
tmp.tofile(fh)
fh.write(b"\n")
else:
# write all points as one big block
# numEntityBlocks(unsigned long) numNodes(unsigned long)
fh.write(f"{1} {len(points)}\n".encode())
# tagEntity(int) dimEntity(int) typeNode(int) numNodes(unsigned long)
fh.write(f"{1} {dim_entity} {type_node} {len(points)}\n".encode())
fmt = "{} " + " ".join(3 * ["{:" + float_fmt + "}"]) + "\n"
for k, x in enumerate(points):
# tag(int) x(double) y(double) z(double)
fh.write(fmt.format(k + 1, x[0], x[1], x[2]).encode())
fh.write(b"$EndNodes\n")
def _write_elements(fh, cell_blocks: list[CellBlock], binary: bool):
# TODO respect binary
# write elements
fh.write(b"$Elements\n")
if binary:
total_num_cells = sum(len(cell_block) for cell_block in cell_blocks)
np.array([len(cell_blocks), total_num_cells], dtype=c_ulong).tofile(fh)
consecutive_index = 0
for cell_block in cell_blocks:
node_idcs = _meshio_to_gmsh_order(cell_block.type, cell_block.data)
# tagEntity(int) dimEntity(int) typeEle(int) numElements(unsigned long)
np.array(
[1, cell_block.dim, _meshio_to_gmsh_type[cell_block.type]],
dtype=c_int,
).tofile(fh)
np.array([node_idcs.shape[0]], dtype=c_ulong).tofile(fh)
if node_idcs.dtype != c_int:
# Binary Gmsh needs c_int (typically np.int32) integers Converting.
node_idcs = node_idcs.astype(c_int)
data = np.column_stack(
[
np.arange(
consecutive_index,
consecutive_index + len(node_idcs),
dtype=c_int,
),
# increment indices by one to conform with gmsh standard
node_idcs + 1,
]
)
data.tofile(fh)
consecutive_index += len(node_idcs)
fh.write(b"\n")
else:
# count all cells
total_num_cells = sum(len(cell_block) for cell_block in cell_blocks)
fh.write(f"{len(cell_blocks)} {total_num_cells}\n".encode())
consecutive_index = 0
for cell_block in cell_blocks:
cell_type = cell_block.type
cell_data = _meshio_to_gmsh_order(cell_block.type, cell_block.data)
# tagEntity(int) dimEntity(int) typeEle(int) numElements(unsigned long)
fh.write(
"{} {} {} {}\n".format(
1, # tag
cell_block.dim,
_meshio_to_gmsh_type[cell_type],
len(cell_data),
).encode()
)
# increment indices by one to conform with gmsh standard
idcs = cell_data + 1
fmt = " ".join(["{}"] * (num_nodes_per_cell[cell_type] + 1)) + "\n"
for idx in idcs:
fh.write(fmt.format(consecutive_index, *idx).encode())
consecutive_index += 1
fh.write(b"$EndElements\n")
def _write_periodic(fh, periodic, float_fmt, binary):
def tofile(fh, value, dtype, **kwargs):
ary = np.array(value, dtype=dtype)
if binary:
ary.tofile(fh)
else:
ary = np.atleast_2d(ary)
fmt = float_fmt if dtype == c_double else "d"
fmt = "%" + kwargs.pop("fmt", fmt)
np.savetxt(fh, ary, fmt=fmt, **kwargs)
fh.write(b"$Periodic\n")
tofile(fh, len(periodic), c_int)
for dim, (stag, mtag), affine, slave_master in periodic:
tofile(fh, [dim, stag, mtag], c_int)
if affine is not None and len(affine) > 0:
tofile(fh, -1, c_long)
tofile(fh, affine, c_double, fmt=float_fmt)
slave_master = np.array(slave_master, dtype=c_int)
slave_master = slave_master.reshape(-1, 2)
slave_master = slave_master + 1 # Add one, Gmsh is 1-based
tofile(fh, len(slave_master), c_int)
tofile(fh, slave_master, c_int)
if binary:
fh.write(b"\n")
fh.write(b"$EndPeriodic\n")
src/meshio/gmsh/_gmsh41.py 0000664 0000000 0000000 00000070576 14562440725 0015733 0 ustar 00root root 0000000 0000000 """
I/O for Gmsh's msh format (version 4.1, as used by Gmsh 4.2.2+), cf.
.
"""
from functools import partial
import numpy as np
from .._common import cell_data_from_raw, num_nodes_per_cell, raw_from_cell_data, warn
from .._exceptions import ReadError, WriteError
from .._mesh import CellBlock, Mesh
from .common import (
_fast_forward_over_blank_lines,
_fast_forward_to_end_block,
_gmsh_to_meshio_order,
_gmsh_to_meshio_type,
_meshio_to_gmsh_order,
_meshio_to_gmsh_type,
_read_data,
_read_physical_names,
_write_data,
_write_physical_names,
)
c_int = np.dtype("i")
c_size_t = np.dtype("P")
c_double = np.dtype("d")
def _size_type(data_size):
return np.dtype(f"u{data_size}")
def read_buffer(f, is_ascii: bool, data_size):
# The format is specified at
# .
# Initialize the optional data fields
points = []
cells = None
field_data = {}
cell_data_raw = {}
cell_tags = {}
point_data = {}
physical_tags = None
bounding_entities = None
cell_sets = {}
periodic = None
while True:
# fast-forward over blank lines
line, is_eof = _fast_forward_over_blank_lines(f)
if is_eof:
break
if line[0] != "$":
raise ReadError(f"Unexpected line {repr(line)}")
environ = line[1:].strip()
if environ == "PhysicalNames":
_read_physical_names(f, field_data)
elif environ == "Entities":
# Read physical tags and information on bounding entities.
# The information is passed to the processing of elements.
physical_tags, bounding_entities = _read_entities(f, is_ascii, data_size)
elif environ == "Nodes":
points, point_tags, point_entities = _read_nodes(f, is_ascii, data_size)
elif environ == "Elements":
cells, cell_tags, cell_sets = _read_elements(
f,
point_tags,
physical_tags,
bounding_entities,
is_ascii,
data_size,
field_data,
)
elif environ == "Periodic":
periodic = _read_periodic(f, is_ascii, data_size)
elif environ == "NodeData":
_read_data(f, "NodeData", point_data, data_size, is_ascii)
elif environ == "ElementData":
_read_data(f, "ElementData", cell_data_raw, data_size, is_ascii)
else:
# From
# :
# ```
# Any section with an unrecognized header is simply ignored: you can thus
# add comments in a .msh file by putting them e.g. inside a
# $Comments/$EndComments section.
# ```
# skip environment
_fast_forward_to_end_block(f, environ)
if cells is None:
raise ReadError("$Element section not found.")
cell_data = cell_data_from_raw(cells, cell_data_raw)
cell_data.update(cell_tags)
# Add node entity information to the point data
point_data.update({"gmsh:dim_tags": point_entities})
return Mesh(
points,
cells,
point_data=point_data,
cell_data=cell_data,
field_data=field_data,
cell_sets=cell_sets,
gmsh_periodic=periodic,
)
def _read_entities(f, is_ascii: bool, data_size):
# Read the entity section. Return physical tags of the entities, and (for
# entities of dimension > 0) the bounding entities (so points that form
# the boundary of a line etc).
# Note that the bounding box of the entities is disregarded. Adding this
# is not difficult, but for the moment, the entropy of adding more data
# does not seem warranted.
fromfile = partial(np.fromfile, sep=" " if is_ascii else "")
c_size_t = _size_type(data_size)
physical_tags = ({}, {}, {}, {})
bounding_entities = ({}, {}, {}, {})
number = fromfile(f, c_size_t, 4) # dims 0, 1, 2, 3
for d, n in enumerate(number):
for _ in range(n):
(tag,) = fromfile(f, c_int, 1)
fromfile(f, c_double, 3 if d == 0 else 6) # discard bounding-box
(num_physicals,) = fromfile(f, c_size_t, 1)
physical_tags[d][tag] = list(fromfile(f, c_int, num_physicals))
if d > 0:
# Number of bounding entities
num_BREP_ = fromfile(f, c_size_t, 1)[0]
# Store bounding entities
bounding_entities[d][tag] = fromfile(f, c_int, num_BREP_)
_fast_forward_to_end_block(f, "Entities")
return physical_tags, bounding_entities
def _read_nodes(f, is_ascii: bool, data_size):
# Read node data: Node coordinates and tags.
# Also find the entities of the nodes, and store this as point_data.
# Note that entity tags are 1-offset within each dimension, thus it is
# necessary to keep track of both tag and dimension of the entity
fromfile = partial(np.fromfile, sep=" " if is_ascii else "")
c_size_t = _size_type(data_size)
# numEntityBlocks numNodes minNodeTag maxNodeTag (all size_t)
num_entity_blocks, total_num_nodes, _, _ = fromfile(f, c_size_t, 4)
points = np.empty((total_num_nodes, 3), dtype=float)
tags = np.empty(total_num_nodes, dtype=int)
dim_tags = np.empty((total_num_nodes, 2), dtype=int)
# To save the entity block id for each node, initialize an array here,
# populate it with num_nodes
idx = 0
for _ in range(num_entity_blocks):
# entityDim(int) entityTag(int) parametric(int) numNodes(size_t)
dim, entity_tag, parametric = fromfile(f, c_int, 3)
if parametric != 0:
raise ReadError("parametric nodes not implemented")
num_nodes = int(fromfile(f, c_size_t, 1)[0])
# From :
# > [...] tags can be "sparse", i.e., do not have to constitute a continuous
# > list of numbers (the format even allows them to not be ordered).
#
# Following https://github.com/nschloe/meshio/issues/388, we read the tags and
# populate the points array accordingly, thereby preserving the order of indices
# of nodes/points.
ixx = slice(idx, idx + num_nodes)
tags[ixx] = fromfile(f, c_size_t, num_nodes) - 1
# Store the point densely and in the order in which they appear in the file.
# x(double) y(double) z(double) (* numNodes)
points[ixx] = fromfile(f, c_double, num_nodes * 3).reshape((num_nodes, 3))
# Entity tag and entity dimension of the nodes. Stored as point-data.
dim_tags[ixx, 0] = dim
dim_tags[ixx, 1] = entity_tag
idx += num_nodes
_fast_forward_to_end_block(f, "Nodes")
return points, tags, dim_tags
def _read_elements(
f, point_tags, physical_tags, bounding_entities, is_ascii, data_size, field_data
):
fromfile = partial(np.fromfile, sep=" " if is_ascii else "")
c_size_t = _size_type(data_size)
# numEntityBlocks numElements minElementTag maxElementTag (all size_t)
num_entity_blocks, _, _, _ = fromfile(f, c_size_t, 4)
data = []
cell_data = {}
cell_sets = {k: [None] * num_entity_blocks for k in field_data.keys()}
for k in range(num_entity_blocks):
# entityDim(int) entityTag(int) elementType(int) numElements(size_t)
dim, tag, type_ele = fromfile(f, c_int, 3)
(num_ele,) = fromfile(f, c_size_t, 1)
for physical_name, cell_set in cell_sets.items():
cell_set[k] = np.arange(
(
num_ele
if (
physical_tags
and field_data[physical_name][1] == dim
and field_data[physical_name][0] in physical_tags[dim][tag]
)
else 0
),
dtype=type(num_ele),
)
tpe = _gmsh_to_meshio_type[type_ele]
num_nodes_per_ele = num_nodes_per_cell[tpe]
d = fromfile(f, c_size_t, int(num_ele * (1 + num_nodes_per_ele))).reshape(
(num_ele, -1)
)
# Find physical tag, if defined; else it is None.
pt = None if not physical_tags else physical_tags[dim][tag]
# Bounding entities (of lower dimension) if defined. Else it is None.
if dim > 0 and bounding_entities: # Points have no boundaries
be = bounding_entities[dim][tag]
else:
be = None
data.append((pt, be, tag, tpe, d))
_fast_forward_to_end_block(f, "Elements")
# Inverse point tags
inv_tags = np.full(np.max(point_tags) + 1, -1, dtype=int)
inv_tags[point_tags] = np.arange(len(point_tags))
# Note that the first column in the data array is the element tag; discard it.
data = [
(physical_tag, bound_entity, geom_tag, tpe, inv_tags[d[:, 1:] - 1])
for physical_tag, bound_entity, geom_tag, tpe, d in data
]
cells = []
for physical_tag, bound_entity, geom_tag, key, values in data:
cells.append(CellBlock(key, _gmsh_to_meshio_order(key, values)))
if physical_tag:
if "gmsh:physical" not in cell_data:
cell_data["gmsh:physical"] = []
cell_data["gmsh:physical"].append(
np.full(len(values), physical_tag[0], int)
)
if "gmsh:geometrical" not in cell_data:
cell_data["gmsh:geometrical"] = []
cell_data["gmsh:geometrical"].append(np.full(len(values), geom_tag, int))
# The bounding entities is stored in the cell_sets.
if bounding_entities:
if "gmsh:bounding_entities" not in cell_sets:
cell_sets["gmsh:bounding_entities"] = []
cell_sets["gmsh:bounding_entities"].append(bound_entity)
return cells, cell_data, cell_sets
def _read_periodic(f, is_ascii, data_size):
fromfile = partial(np.fromfile, sep=" " if is_ascii else "")
c_size_t = _size_type(data_size)
periodic = []
# numPeriodicLinks(size_t)
num_periodic = int(fromfile(f, c_size_t, 1)[0])
for _ in range(num_periodic):
# entityDim(int) entityTag(int) entityTagMaster(int)
edim, stag, mtag = fromfile(f, c_int, 3)
# numAffine(size_t) value(double) ...
num_affine = int(fromfile(f, c_size_t, 1)[0])
affine = fromfile(f, c_double, num_affine)
# numCorrespondingNodes(size_t)
num_nodes = int(fromfile(f, c_size_t, 1)[0])
# nodeTag(size_t) nodeTagMaster(size_t) ...
slave_master = fromfile(f, c_size_t, num_nodes * 2).reshape(-1, 2)
slave_master = slave_master - 1 # Subtract one, Python is 0-based
periodic.append([edim, (stag, mtag), affine, slave_master])
_fast_forward_to_end_block(f, "Periodic")
return periodic
def write(filename, mesh, float_fmt=".16e", binary=True):
"""Writes msh files, cf.
.
"""
# Filter the point data: gmsh:dim_tags are tags, the rest is actual point data.
point_data = {}
for key, d in mesh.point_data.items():
if key not in ["gmsh:dim_tags"]:
point_data[key] = d
# Split the cell data: gmsh:physical and gmsh:geometrical are tags, the rest is
# actual cell data.
tag_data = {}
cell_data = {}
for key, d in mesh.cell_data.items():
if key in ["gmsh:physical", "gmsh:geometrical", "cell_tags"]:
tag_data[key] = d
else:
cell_data[key] = d
with open(filename, "wb") as fh:
file_type = 1 if binary else 0
data_size = c_size_t.itemsize
fh.write(b"$MeshFormat\n")
fh.write(f"4.1 {file_type} {data_size}\n".encode())
if binary:
np.array([1], dtype=c_int).tofile(fh)
fh.write(b"\n")
fh.write(b"$EndMeshFormat\n")
if mesh.field_data:
_write_physical_names(fh, mesh.field_data)
_write_entities(
fh, mesh.cells, tag_data, mesh.cell_sets, mesh.point_data, binary
)
_write_nodes(fh, mesh.points, mesh.cells, mesh.point_data, float_fmt, binary)
_write_elements(fh, mesh.cells, tag_data, binary)
if mesh.gmsh_periodic is not None:
_write_periodic(fh, mesh.gmsh_periodic, float_fmt, binary)
for name, dat in point_data.items():
_write_data(fh, "NodeData", name, dat, binary)
cell_data_raw = raw_from_cell_data(cell_data)
for name, dat in cell_data_raw.items():
_write_data(fh, "ElementData", name, dat, binary)
def _write_entities(fh, cells, tag_data, cell_sets, point_data, binary):
"""Write entity section in a .msh file.
The entity section links up to three kinds of information:
1) The geometric objects represented in the mesh.
2) Physical tags of geometric objects. This data will be a subset
of that represented in 1)
3) Which geometric objects form the boundary of this object.
The boundary is formed of objects with dimension 1 less than
the current one. A boundary can only be specified for objects of
dimension at least 1.
The entities of all geometric objects is pulled from
point_data['gmsh:dim_tags']. For details, see the function _write_nodes().
Physical tags are specified as tag_data, while the boundary of a geometric
object is specified in cell_sets.
"""
# The data format for the entities section is
#
# numPoints(size_t) numCurves(size_t)
# numSurfaces(size_t) numVolumes(size_t)
# pointTag(int) X(double) Y(double) Z(double)
# numPhysicalTags(size_t) physicalTag(int) ...
# ...
# curveTag(int) minX(double) minY(double) minZ(double)
# maxX(double) maxY(double) maxZ(double)
# numPhysicalTags(size_t) physicalTag(int) ...
# numBoundingPoints(size_t) pointTag(int) ...
# ...
# surfaceTag(int) minX(double) minY(double) minZ(double)
# maxX(double) maxY(double) maxZ(double)
# numPhysicalTags(size_t) physicalTag(int) ...
# numBoundingCurves(size_t) curveTag(int) ...
# ...
# volumeTag(int) minX(double) minY(double) minZ(double)
# maxX(double) maxY(double) maxZ(double)
# numPhysicalTags(size_t) physicalTag(int) ...
# numBoundngSurfaces(size_t) surfaceTag(int) ...
# Both nodes and cells have entities, but the cell entities are a subset of
# the nodes. The reason is (if the inner workings of Gmsh has been correctly
# understood) that node entities are assigned to all
# objects necessary to specify the geometry whereas only cells of Physical
# objcets (gmsh jargon) are present among the cell entities.
# The entities section must therefore be built on the node-entities, if
# these are available. If this is not the case, we leave this section blank.
# TODO: Should this give a warning?
if "gmsh:dim_tags" not in point_data:
return
fh.write(b"$Entities\n")
# Array of entity tag (first row) and dimension (second row) per node.
# We need to combine the two, since entity tags are reset for each dimension.
# Uniquify, so that each row in node_dim_tags represent a unique entity
node_dim_tags = np.unique(point_data["gmsh:dim_tags"], axis=0)
# Write number of entities per dimension
num_occ = np.bincount(node_dim_tags[:, 0], minlength=4)
if num_occ.size > 4:
raise ValueError("Encountered entity with dimension > 3")
if binary:
num_occ.astype(c_size_t).tofile(fh)
else:
fh.write(f"{num_occ[0]} {num_occ[1]} {num_occ[2]} {num_occ[3]}\n".encode())
# Array of dimension and entity tag per cell. Will be compared with the
# similar not array.
cell_dim_tags = np.empty((len(cells), 2), dtype=int)
for ci, cell_block in enumerate(cells):
cell_dim_tags[ci] = [
cell_block.dim,
tag_data["gmsh:geometrical"][ci][0],
]
# We will only deal with bounding entities if this information is available
has_bounding_elements = "gmsh:bounding_entities" in cell_sets
# The node entities form a superset of cell entities. Write entity information
# based on nodes, supplement with cell information when there is a matcihng
# cell block.
for dim, tag in node_dim_tags:
# Find the matching cell block, if it exists
matching_cell_block = np.where(
np.logical_and(cell_dim_tags[:, 0] == dim, cell_dim_tags[:, 1] == tag)
)[0]
if matching_cell_block.size > 1:
# It is not 100% clear if this is not permissible, but the current
# implementation for sure does not allow it.
raise ValueError("Encountered non-unique CellBlock dim_tag")
# The information to be written varies according to entity dimension,
# whether entity has a physical tag, and between ascii and binary.
# The resulting code is a bit ugly, but no simpler and clean option
# seems possible.
# Entity tag
if binary:
np.array([tag], dtype=c_int).tofile(fh)
else:
fh.write(f"{tag} ".encode())
# Min-max coordinates for the entity. For now, simply put zeros here,
# and hope that gmsh does not complain. To expand this, the point
# coordinates must be made available to this function; the bounding
# box can then be found by a min-max over the points of the matching
# cell.
if dim == 0:
# Bounding box is a point
if binary:
np.zeros(3, dtype=c_double).tofile(fh)
else:
fh.write(b"0 0 0 ")
else:
# Bounding box has six coordinates
if binary:
np.zeros(6, dtype=c_double).tofile(fh)
else:
fh.write(b"0 0 0 0 0 0 ")
# If there is a corresponding cell block, write physical tags (if any)
# and bounding entities (if any)
if matching_cell_block.size > 0:
# entity has a physical tag, write this
# ASSUMPTION: There is a single physical tag for this
physical_tag = tag_data["gmsh:physical"][matching_cell_block[0]][0]
if binary:
np.array([1], dtype=c_size_t).tofile(fh)
np.array([physical_tag], dtype=c_int).tofile(fh)
else:
fh.write(f"1 {physical_tag} ".encode())
else:
# The number of physical tags is zero
if binary:
np.array([0], dtype=c_size_t).tofile(fh)
else:
fh.write(b"0 ")
if dim > 0:
# Entities not of the lowest dimension can have their
# bounding elements (of dimension one less) specified
if has_bounding_elements and matching_cell_block.size > 0:
# The bounding element should be a list
bounds = cell_sets["gmsh:bounding_entities"][matching_cell_block[0]]
num_bounds = len(bounds)
if num_bounds > 0:
if binary:
np.array(num_bounds, dtype=c_size_t).tofile(fh)
np.array(bounds, dtype=c_int).tofile(fh)
else:
fh.write(f"{num_bounds} ".encode())
for bi in bounds:
fh.write(f"{bi} ".encode())
fh.write(b"\n")
else:
# Register that there are no bounding elements
if binary:
np.array([0], dtype=c_size_t).tofile(fh)
else:
fh.write(b"0\n")
else:
# Register that there are no bounding elements
if binary:
np.array([0], dtype=c_size_t).tofile(fh)
else:
fh.write(b"0\n")
else:
# If ascii, enforce line change
if not binary:
fh.write(b"\n")
if binary:
fh.write(b"\n")
# raise NotImplementedError
fh.write(b"$EndEntities\n")
def _write_nodes(fh, points, cells, point_data, float_fmt, binary):
"""Write node information.
If data on dimension and tags of the geometric entities which the nodes belong to
is available available, the nodes will be grouped accordingly. This data is
specified as point_data, using the key 'gmsh:dim_tags' and data as an
num_points x 2 numpy array (first column is the dimension of the geometric entity
of this node, second is the tag).
If dim_tags are not available, all nodes will be assigned the same tag of 0. This
only makes sense if a single cell block is present in the mesh; an error will be
raised if len(cells) > 1.
"""
if points.shape[1] == 2:
# msh4 requires 3D points, but 2D points given.
# Appending 0 third component.
points = np.column_stack([points, np.zeros_like(points[:, 0])])
fh.write(b"$Nodes\n")
# The format for the nodes section is
#
# $Nodes
# numEntityBlocks(size_t) numNodes(size_t) minNodeTag(size_t) maxNodeTag(size_t)
# entityDim(int) entityTag(int) parametric(int; 0 or 1)
# numNodesInBlock(size_t)
# nodeTag(size_t)
# ...
# x(double) y(double) z(double)
# < u(double; if parametric and entityDim >= 1) >
# < v(double; if parametric and entityDim >= 2) >
# < w(double; if parametric and entityDim == 3) >
# ...
# ...
# $EndNodes
#
n = points.shape[0]
min_tag = 1
max_tag = n
is_parametric = 0
# If node (entity) tag and dimension is available, we make a list of unique
# combinations thereof, and a map from the full node set to the unique
# set.
if "gmsh:dim_tags" in point_data:
# reverse_index_map maps from all nodes to their respective representation in
# (the uniquified) node_dim_tags. This approach works for general orderings of
# the nodes
node_dim_tags, reverse_index_map = np.unique(
point_data["gmsh:dim_tags"],
axis=0,
return_inverse=True,
)
else:
# If entity information is not provided, we will assign the same entity for all
# nodes. This only makes sense if the cells are of a single type
if len(cells) != 1:
raise WriteError(
"Specify entity information (gmsh:dim_tags in point_data) "
+ "to deal with more than one cell type. "
)
dim = cells[0].dim
tag = 0
node_dim_tags = np.array([[dim, tag]])
# All nodes map to the (single) dimension-entity object
reverse_index_map = np.full(n, 0, dtype=int)
num_blocks = node_dim_tags.shape[0]
# First write preamble
if binary:
if points.dtype != c_double:
warn(f"Binary Gmsh needs c_double points (got {points.dtype}). Converting.")
points = points.astype(c_double)
np.array([num_blocks, n, min_tag, max_tag], dtype=c_size_t).tofile(fh)
else:
fh.write(f"{num_blocks} {n} {min_tag} {max_tag}\n".encode())
for j in range(num_blocks):
dim, tag = node_dim_tags[j]
node_tags = np.where(reverse_index_map == j)[0]
num_points_this = node_tags.size
if binary:
np.array([dim, tag, is_parametric], dtype=c_int).tofile(fh)
np.array([num_points_this], dtype=c_size_t).tofile(fh)
(node_tags + 1).astype(c_size_t).tofile(fh)
points[node_tags].tofile(fh)
else:
fh.write(f"{dim} {tag} {is_parametric} {num_points_this}\n".encode())
(node_tags + 1).astype(c_size_t).tofile(fh, "\n", "%d")
fh.write(b"\n")
np.savetxt(fh, points[node_tags], delimiter=" ", fmt="%" + float_fmt)
if binary:
fh.write(b"\n")
fh.write(b"$EndNodes\n")
def _write_elements(fh, cells, tag_data, binary: bool) -> None:
"""write the $Elements block
$Elements
numEntityBlocks(size_t)
numElements(size_t) minElementTag(size_t) maxElementTag(size_t)
entityDim(int) entityTag(int) elementType(int; see below) numElementsInBlock(size_t)
elementTag(size_t) nodeTag(size_t) ...
...
...
$EndElements
"""
fh.write(b"$Elements\n")
total_num_cells = sum(len(c) for c in cells)
num_blocks = len(cells)
min_element_tag = 1
max_element_tag = total_num_cells
if binary:
np.array(
[num_blocks, total_num_cells, min_element_tag, max_element_tag],
dtype=c_size_t,
).tofile(fh)
tag0 = 1
for ci, cell_block in enumerate(cells):
node_idcs = _meshio_to_gmsh_order(cell_block.type, cell_block.data)
if node_idcs.dtype != c_size_t:
# Binary Gmsh needs c_size_t. Converting."
node_idcs = node_idcs.astype(c_size_t)
# entityDim(int) entityTag(int) elementType(int)
# numElementsBlock(size_t)
# The entity tag should be equal within a CellBlock
if "gmsh:geometrical" in tag_data:
entity_tag = tag_data["gmsh:geometrical"][ci][0]
else:
entity_tag = 0
cell_type = _meshio_to_gmsh_type[cell_block.type]
np.array([cell_block.dim, entity_tag, cell_type], dtype=c_int).tofile(fh)
n = node_idcs.shape[0]
np.array([n], dtype=c_size_t).tofile(fh)
if node_idcs.dtype != c_size_t:
warn(
f"Binary Gmsh cells need c_size_t (got {node_idcs.dtype}). "
+ "Converting."
)
node_idcs = node_idcs.astype(c_size_t)
np.column_stack(
[
np.arange(tag0, tag0 + n, dtype=c_size_t),
# increment indices by one to conform with gmsh standard
node_idcs + 1,
]
).tofile(fh)
tag0 += n
fh.write(b"\n")
else:
fh.write(
"{} {} {} {}\n".format(
num_blocks, total_num_cells, min_element_tag, max_element_tag
).encode()
)
tag0 = 1
for ci, cell_block in enumerate(cells):
node_idcs = _meshio_to_gmsh_order(cell_block.type, cell_block.data)
# entityDim(int) entityTag(int) elementType(int) numElementsBlock(size_t)
# The entity tag should be equal within a CellBlock
if "gmsh:geometrical" in tag_data:
entity_tag = tag_data["gmsh:geometrical"][ci][0]
else:
entity_tag = 0
cell_type = _meshio_to_gmsh_type[cell_block.type]
n = len(cell_block.data)
fh.write(f"{cell_block.dim} {entity_tag} {cell_type} {n}\n".encode())
np.savetxt(
fh,
# Gmsh indexes from 1 not 0
np.column_stack([np.arange(tag0, tag0 + n), node_idcs + 1]),
"%d",
" ",
)
tag0 += n
fh.write(b"$EndElements\n")
def _write_periodic(fh, periodic, float_fmt: str, binary: bool) -> None:
"""write the $Periodic block
specified as
$Periodic
numPeriodicLinks(size_t)
entityDim(int) entityTag(int) entityTagMaster(int)
numAffine(size_t) value(double) ...
numCorrespondingNodes(size_t)
nodeTag(size_t) nodeTagMaster(size_t)
...
...
$EndPeriodic
"""
def tofile(fh, value, dtype, **kwargs):
ary = np.array(value, dtype=dtype)
if binary:
ary.tofile(fh)
else:
ary = np.atleast_2d(ary)
fmt = float_fmt if dtype == c_double else "d"
fmt = "%" + kwargs.pop("fmt", fmt)
np.savetxt(fh, ary, fmt=fmt, **kwargs)
fh.write(b"$Periodic\n")
tofile(fh, len(periodic), c_size_t)
for dim, (stag, mtag), affine, slave_master in periodic:
tofile(fh, [dim, stag, mtag], c_int)
if affine is None or len(affine) == 0:
tofile(fh, 0, c_size_t)
else:
tofile(fh, len(affine), c_size_t, newline=" ")
tofile(fh, affine, c_double, fmt=float_fmt)
slave_master = np.array(slave_master, dtype=c_size_t)
slave_master = slave_master.reshape(-1, 2)
slave_master = slave_master + 1 # Add one, Gmsh is 1-based
tofile(fh, len(slave_master), c_size_t)
tofile(fh, slave_master, c_size_t)
if binary:
fh.write(b"\n")
fh.write(b"$EndPeriodic\n")
src/meshio/gmsh/common.py 0000664 0000000 0000000 00000021071 14562440725 0015743 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import shlex
import numpy as np
from numpy.typing import ArrayLike
from .._common import warn
from .._exceptions import ReadError, WriteError
c_int = np.dtype("int32")
c_double = np.dtype("float64")
def _fast_forward_to_end_block(f, block):
"""fast-forward to end of block"""
# See also https://github.com/nschloe/pygalmesh/issues/34
for line in f:
try:
line = line.decode()
except UnicodeDecodeError:
pass
if line.strip() == f"$End{block}":
break
else:
warn(f"${block} not closed by $End{block}.")
def _fast_forward_over_blank_lines(f):
is_eof = False
while True:
line = f.readline().decode()
if not line:
is_eof = True
break
elif len(line.strip()) > 0:
break
return line, is_eof
def _read_physical_names(f, field_data):
line = f.readline().decode()
num_phys_names = int(line)
for _ in range(num_phys_names):
line = shlex.split(f.readline().decode())
key = line[2]
value = np.array(line[1::-1], dtype=int)
field_data[key] = value
_fast_forward_to_end_block(f, "PhysicalNames")
def _read_data(f, tag, data_dict, data_size, is_ascii):
# Read string tags
num_string_tags = int(f.readline().decode())
string_tags = [
f.readline().decode().strip().replace('"', "") for _ in range(num_string_tags)
]
# The real tags typically only contain one value, the time.
# Discard it.
num_real_tags = int(f.readline().decode())
for _ in range(num_real_tags):
f.readline()
num_integer_tags = int(f.readline().decode())
integer_tags = [int(f.readline().decode()) for _ in range(num_integer_tags)]
num_components = integer_tags[1]
num_items = integer_tags[2]
if is_ascii:
data = np.fromfile(f, count=num_items * (1 + num_components), sep=" ").reshape(
(num_items, 1 + num_components)
)
# The first entry is the node number
data = data[:, 1:]
else:
# binary
dtype = [("index", c_int), ("values", c_double, (num_components,))]
data = np.fromfile(f, count=num_items, dtype=dtype)
if not (data["index"] == range(1, num_items + 1)).all():
raise ReadError()
data = np.ascontiguousarray(data["values"])
_fast_forward_to_end_block(f, tag)
# The gmsh format cannot distinguish between data of shape (n,) and (n, 1).
# If shape[1] == 1, cut it off.
if data.shape[1] == 1:
data = data[:, 0]
data_dict[string_tags[0]] = data
# Translate meshio types to gmsh codes
# http://gmsh.info//doc/texinfo/gmsh.html#MSH-file-format-version-2
_gmsh_to_meshio_type = {
1: "line",
2: "triangle",
3: "quad",
4: "tetra",
5: "hexahedron",
6: "wedge",
7: "pyramid",
8: "line3",
9: "triangle6",
10: "quad9",
11: "tetra10",
12: "hexahedron27",
13: "wedge18",
14: "pyramid14",
15: "vertex",
16: "quad8",
17: "hexahedron20",
18: "wedge15",
19: "pyramid13",
21: "triangle10",
23: "triangle15",
25: "triangle21",
26: "line4",
27: "line5",
28: "line6",
29: "tetra20",
30: "tetra35",
31: "tetra56",
36: "quad16",
37: "quad25",
38: "quad36",
42: "triangle28",
43: "triangle36",
44: "triangle45",
45: "triangle55",
46: "triangle66",
47: "quad49",
48: "quad64",
49: "quad81",
50: "quad100",
51: "quad121",
62: "line7",
63: "line8",
64: "line9",
65: "line10",
66: "line11",
71: "tetra84",
72: "tetra120",
73: "tetra165",
74: "tetra220",
75: "tetra286",
90: "wedge40",
91: "wedge75",
92: "hexahedron64",
93: "hexahedron125",
94: "hexahedron216",
95: "hexahedron343",
96: "hexahedron512",
97: "hexahedron729",
98: "hexahedron1000",
106: "wedge126",
107: "wedge196",
108: "wedge288",
109: "wedge405",
110: "wedge550",
}
_meshio_to_gmsh_type = {v: k for k, v in _gmsh_to_meshio_type.items()}
def _gmsh_to_meshio_order(cell_type: str, idx: ArrayLike) -> np.ndarray:
# Gmsh cells are mostly ordered like VTK, with a few exceptions:
meshio_ordering = {
# fmt: off
"tetra10": [0, 1, 2, 3, 4, 5, 6, 7, 9, 8],
"hexahedron20": [
0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 13,
9, 16, 18, 19, 17, 10, 12, 14, 15,
], # https://vtk.org/doc/release/4.2/html/classvtkQuadraticHexahedron.html and https://gmsh.info/doc/texinfo/gmsh.html#Node-ordering
"hexahedron27": [
0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 13,
9, 16, 18, 19, 17, 10, 12, 14, 15,
22, 23, 21, 24, 20, 25, 26,
],
"wedge15": [
0, 1, 2, 3, 4, 5, 6, 9, 7, 12, 14, 13, 8, 10, 11
], # http://davis.lbl.gov/Manuals/VTK-4.5/classvtkQuadraticWedge.html and https://gmsh.info/doc/texinfo/gmsh.html#Node-ordering
"pyramid13": [0, 1, 2, 3, 4, 5, 8, 10, 6, 7, 9, 11, 12],
# fmt: on
}
idx = np.asarray(idx)
if cell_type not in meshio_ordering:
return idx
return idx[:, meshio_ordering[cell_type]]
def _meshio_to_gmsh_order(cell_type: str, idx: ArrayLike) -> np.ndarray:
# Gmsh cells are mostly ordered like VTK, with a few exceptions:
gmsh_ordering = {
# fmt: off
"tetra10": [0, 1, 2, 3, 4, 5, 6, 7, 9, 8],
"hexahedron20": [
0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 16,
9, 17, 10, 18, 19, 12, 15, 13, 14,
],
"hexahedron27": [
0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 16,
9, 17, 10, 18, 19, 12, 15, 13, 14,
24, 22, 20, 21, 23, 25, 26,
],
"wedge15": [
0, 1, 2, 3, 4, 5, 6, 8, 12, 7, 13, 14, 9, 11, 10,
],
"pyramid13": [0, 1, 2, 3, 4, 5, 8, 9, 6, 10, 7, 11, 12],
# fmt: on
}
idx = np.asarray(idx)
if cell_type not in gmsh_ordering:
return idx
return idx[:, gmsh_ordering[cell_type]]
def _write_physical_names(fh, field_data):
# Write physical names
entries = []
for phys_name in field_data:
try:
phys_num, phys_dim = field_data[phys_name]
phys_num, phys_dim = int(phys_num), int(phys_dim)
entries.append((phys_dim, phys_num, phys_name))
except (ValueError, TypeError):
warn("Field data contains entry that cannot be processed.")
entries.sort()
if entries:
fh.write(b"$PhysicalNames\n")
fh.write(f"{len(entries)}\n".encode())
for entry in entries:
fh.write('{} {} "{}"\n'.format(*entry).encode())
fh.write(b"$EndPhysicalNames\n")
def _write_data(fh, tag, name, data, binary):
fh.write(f"${tag}\n".encode())
# :
# > Number of string tags.
# > gives the number of string tags that follow. By default the first
# > string-tag is interpreted as the name of the post-processing view and
# > the second as the name of the interpolation scheme. The interpolation
# > scheme is provided in the $InterpolationScheme section (see below).
fh.write(f"{1}\n".encode())
fh.write(f'"{name}"\n'.encode())
fh.write(f"{1}\n".encode())
fh.write(f"{0.0}\n".encode())
# three integer tags:
fh.write(f"{3}\n".encode())
# time step
fh.write(f"{0}\n".encode())
# number of components
num_components = data.shape[1] if len(data.shape) > 1 else 1
if num_components not in [1, 3, 9]:
raise WriteError("Gmsh only permits 1, 3, or 9 components per data field.")
# Cut off the last dimension in case it's 1. This avoids problems with
# writing the data.
if len(data.shape) > 1 and data.shape[1] == 1:
data = data[:, 0]
fh.write(f"{num_components}\n".encode())
# num data items
fh.write(f"{data.shape[0]}\n".encode())
# actually write the data
if binary:
if num_components == 1:
dtype = [("index", c_int), ("data", c_double)]
else:
dtype = [("index", c_int), ("data", c_double, num_components)]
tmp = np.empty(len(data), dtype=dtype)
tmp["index"] = 1 + np.arange(len(data))
tmp["data"] = data
tmp.tofile(fh)
fh.write(b"\n")
else:
fmt = " ".join(["{}"] + ["{!r}"] * num_components) + "\n"
# TODO unify
if num_components == 1:
for k, x in enumerate(data):
fh.write(fmt.format(k + 1, x).encode())
else:
for k, x in enumerate(data):
fh.write(fmt.format(k + 1, *x).encode())
fh.write(f"$End{tag}\n".encode())
src/meshio/gmsh/main.py 0000664 0000000 0000000 00000006557 14562440725 0015413 0 ustar 00root root 0000000 0000000 import pathlib
import struct
from .._exceptions import ReadError, WriteError
from .._helpers import register_format
from . import _gmsh22, _gmsh40, _gmsh41
from .common import _fast_forward_to_end_block
# Some mesh files out there have the version specified as version "2" when it really is
# "2.2". Same with "4" vs "4.1".
_readers = {"2": _gmsh22, "2.2": _gmsh22, "4.0": _gmsh40, "4": _gmsh41, "4.1": _gmsh41}
_writers = {"2.2": _gmsh22, "4.0": _gmsh40, "4.1": _gmsh41}
def read(filename):
"""Reads a Gmsh msh file."""
filename = pathlib.Path(filename)
with open(filename.as_posix(), "rb") as f:
mesh = read_buffer(f)
return mesh
def read_buffer(f):
# The various versions of the format are specified at
# .
line = f.readline().decode().strip()
# skip any $Comments/$EndComments sections
while line == "$Comments":
_fast_forward_to_end_block(f, "Comments")
line = f.readline().decode().strip()
if line != "$MeshFormat":
raise ReadError()
fmt_version, data_size, is_ascii = _read_header(f)
try:
reader = _readers[fmt_version]
except KeyError:
try:
reader = _readers[fmt_version.split(".")[0]]
except KeyError:
raise ValueError(
"Need mesh format in {} (got {})".format(
sorted(_readers.keys()), fmt_version
)
)
return reader.read_buffer(f, is_ascii, data_size)
def _read_header(f):
"""Read the mesh format block
specified as
version(ASCII double; currently 4.1)
file-type(ASCII int; 0 for ASCII mode, 1 for binary mode)
data-size(ASCII int; sizeof(size_t))
< int with value one; only in binary mode, to detect endianness >
though here the version is left as str
"""
# http://gmsh.info/doc/texinfo/gmsh.html#MSH-file-format
line = f.readline().decode()
# Split the line
# 4.1 0 8
# into its components.
str_list = list(filter(None, line.split()))
fmt_version = str_list[0]
if str_list[1] not in ["0", "1"]:
raise ReadError()
is_ascii = str_list[1] == "0"
data_size = int(str_list[2])
if not is_ascii:
# The next line is the integer 1 in bytes. Useful for checking endianness.
# Just assert that we get 1 here.
one = f.read(struct.calcsize("i"))
if struct.unpack("i", one)[0] != 1:
raise ReadError()
_fast_forward_to_end_block(f, "MeshFormat")
return fmt_version, data_size, is_ascii
# Gmsh ASCII output uses `%.16g` for floating point values,
# meshio uses same precision but exponential notation `%.16e`.
def write(filename, mesh, fmt_version="4.1", binary=True, float_fmt=".16e"):
"""Writes a Gmsh msh file."""
try:
writer = _writers[fmt_version]
except KeyError:
try:
writer = _writers[fmt_version]
except KeyError:
raise WriteError(
"Need mesh format in {} (got {})".format(
sorted(_writers.keys()), fmt_version
)
)
writer.write(filename, mesh, binary=binary, float_fmt=float_fmt)
register_format(
"gmsh",
[".msh"],
read,
{
"gmsh22": lambda f, m, **kwargs: write(f, m, "2.2", **kwargs),
"gmsh": lambda f, m, **kwargs: write(f, m, "4.1", **kwargs),
},
)
src/meshio/h5m/ 0000775 0000000 0000000 00000000000 14562440725 0013633 5 ustar 00root root 0000000 0000000 src/meshio/h5m/__init__.py 0000664 0000000 0000000 00000000073 14562440725 0015744 0 ustar 00root root 0000000 0000000 from ._h5m import read, write
__all__ = ["read", "write"]
src/meshio/h5m/_h5m.py 0000664 0000000 0000000 00000021367 14562440725 0015046 0 ustar 00root root 0000000 0000000 """
I/O for h5m, cf.
.
"""
from datetime import datetime
import numpy as np
from .. import __about__
from .._common import warn
from .._helpers import register_format
from .._mesh import CellBlock, Mesh
# def _int_to_bool_list(num):
# # From .
# bin_string = format(num, '04b')
# return [x == '1' for x in bin_string[::-1]]
def read(filename):
import h5py
f = h5py.File(filename, "r")
dset = f["tstt"]
points = dset["nodes"]["coordinates"][()]
# read point data
point_data = {}
if "tags" in dset["nodes"]:
for name, dataset in dset["nodes"]["tags"].items():
point_data[name] = dataset[()]
# # Assert that the GLOBAL_IDs are contiguous.
# point_gids = dset['nodes']['tags']['GLOBAL_ID'][()]
# point_start_gid = dset['nodes']['coordinates'].attrs['start_id']
# point_end_gid = point_start_gid + len(point_gids) - 1
# assert all(point_gids == range(point_start_gid, point_end_gid + 1))
h5m_to_meshio_type = {
"Edge2": "line",
"Hex8": "hexahedron",
"Prism6": "wedge",
"Pyramid5": "pyramid",
"Quad4": "quad",
"Tri3": "triangle",
"Tet4": "tetra",
}
cells = []
cell_data = {}
for h5m_type, data in dset["elements"].items():
meshio_type = h5m_to_meshio_type[h5m_type]
conn = data["connectivity"]
# Note that the indices are off by 1 in h5m.
cells.append(CellBlock(meshio_type, conn[()] - 1))
# TODO bring cell data back
# if 'tags' in data:
# for name, dataset in data['tags'].items():
# cell_data[name] = dataset[()]
# The `sets` in H5M are special in that they represent a segration of data
# in the current file, particularly by a load balancer (Metis, Zoltan,
# etc.). This segregation has no equivalent in other data types, but is
# certainly worthwhile visualizing.
# Hence, we will translate the sets into cell data with the prefix "set::"
# here.
field_data = {}
# TODO deal with sets
# if 'sets' in dset and 'contents' in dset['sets']:
# # read sets
# sets_contents = dset['sets']['contents'][()]
# sets_list = dset['sets']['list'][()]
# sets_tags = dset['sets']['tags']
# cell_start_gid = conn.attrs['start_id']
# cell_gids = cell_start_gid + elems['tags']['GLOBAL_ID'][()]
# cell_end_gid = cell_start_gid + len(cell_gids) - 1
# assert all(cell_gids == range(cell_start_gid, cell_end_gid + 1))
# # create the sets
# for key, value in sets_tags.items():
# mod_key = 'set::' + key
# cell_data[mod_key] = np.empty(len(cells), dtype=int)
# end = 0
# for k, row in enumerate(sets_list):
# bits = _int_to_bool_list(row[3])
# # is_owner = bits[0]
# # is_unique = bits[1]
# # is_ordered = bits[2]
# is_range_compressed = bits[3]
# if is_range_compressed:
# start_gids = sets_contents[end:row[0]+1:2]
# lengths = sets_contents[end+1:row[0]+1:2]
# for start_gid, length in zip(start_gids, lengths):
# end_gid = start_gid + length - 1
# if start_gid >= cell_start_gid and \
# end_gid <= cell_end_gid:
# i0 = start_gid - cell_start_gid
# i1 = end_gid - cell_start_gid + 1
# cell_data[mod_key][i0:i1] = value[k]
# else:
# # TODO deal with point data
# raise RuntimeError('')
# else:
# gids = sets_contents[end:row[0]+1]
# cell_data[mod_key][gids - cell_start_gid] = value[k]
# end = row[0] + 1
return Mesh(
points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data
)
def write(filename, mesh, add_global_ids=True, compression="gzip", compression_opts=4):
import h5py
f = h5py.File(filename, "w")
tstt = f.create_group("tstt")
# The base index for h5m is 1.
global_id = 1
# add nodes
nodes = tstt.create_group("nodes")
coords = nodes.create_dataset(
"coordinates",
data=mesh.points,
compression=compression,
compression_opts=compression_opts,
)
coords.attrs.create("start_id", global_id)
global_id += len(mesh.points)
# Global tags
tstt_tags = tstt.create_group("tags")
# The GLOBAL_ID associated with a point is used to identify points if
# distributed across several processes. mbpart automatically adds them,
# too.
# Copy to pd to avoid changing point_data. The items are not deep-copied.
pd = mesh.point_data.copy()
if "GLOBAL_ID" not in pd and add_global_ids:
pd["GLOBAL_ID"] = np.arange(1, len(mesh.points) + 1)
# add point data
if pd:
tags = nodes.create_group("tags")
for key, data in pd.items():
if len(data.shape) == 1:
dtype = data.dtype
tags.create_dataset(
key,
data=data,
compression=compression,
compression_opts=compression_opts,
)
else:
# H5M doesn't accept n-x-k arrays as data; it wants an n-x-1
# array with k-tuples as entries.
n, k = data.shape
dtype = np.dtype((data.dtype, (k,)))
dset = tags.create_dataset(
key,
(n,),
dtype=dtype,
compression=compression,
compression_opts=compression_opts,
)
dset[:] = data
# Create entry in global tags
g = tstt_tags.create_group(key)
g["type"] = dtype
# Add a class tag:
# From
# :
# ```
# /* Was dense tag data in mesh database */
# define mhdf_DENSE_TYPE 2
# /** \brief Was sparse tag data in mesh database */
# #define mhdf_SPARSE_TYPE 1
# /** \brief Was bit-field tag data in mesh database */
# #define mhdf_BIT_TYPE 0
# /** \brief Unused */
# #define mhdf_MESH_TYPE 3
#
g.attrs["class"] = 2
# add elements
elements = tstt.create_group("elements")
elem_dt = h5py.special_dtype(
enum=(
"i",
{
"Edge": 1,
"Tri": 2,
"Quad": 3,
"Polygon": 4,
"Tet": 5,
"Pyramid": 6,
"Prism": 7,
"Knife": 8,
"Hex": 9,
"Polyhedron": 10,
},
)
)
tstt["elemtypes"] = elem_dt
tstt.create_dataset(
"history",
data=[
__name__.encode(),
__about__.__version__.encode(),
str(datetime.now()).encode(),
],
compression=compression,
compression_opts=compression_opts,
)
# number of nodes to h5m name, element type
meshio_to_h5m_type = {
"line": {"name": "Edge2", "type": 1},
"triangle": {"name": "Tri3", "type": 2},
"tetra": {"name": "Tet4", "type": 5},
}
for cell_block in mesh.cells:
key = cell_block.type
data = cell_block.data
if key not in meshio_to_h5m_type:
warn("Unsupported H5M element type '%s'. Skipping.", key)
continue
this_type = meshio_to_h5m_type[key]
elem_group = elements.create_group(this_type["name"])
elem_group.attrs.create("element_type", this_type["type"], dtype=elem_dt)
# h5m node indices are 1-based
conn = elem_group.create_dataset(
"connectivity",
data=(data + 1),
compression=compression,
compression_opts=compression_opts,
)
conn.attrs.create("start_id", global_id)
global_id += len(data)
# add cell data
for cell_type, cd in mesh.cell_data.items():
if cd:
tags = elem_group.create_group("tags")
for key, value in cd.items():
tags.create_dataset(
key,
data=value,
compression=compression,
compression_opts=compression_opts,
)
# add empty set -- MOAB wants this
sets = tstt.create_group("sets")
sets.create_group("tags")
# set max_id
tstt.attrs.create("max_id", global_id, dtype="u8")
register_format("h5m", [".h5m"], read, {"h5m": write})
src/meshio/hmf/ 0000775 0000000 0000000 00000000000 14562440725 0013714 5 ustar 00root root 0000000 0000000 src/meshio/hmf/__init__.py 0000664 0000000 0000000 00000000073 14562440725 0016025 0 ustar 00root root 0000000 0000000 from ._hmf import read, write
__all__ = ["read", "write"]
src/meshio/hmf/_hmf.py 0000664 0000000 0000000 00000010466 14562440725 0015206 0 ustar 00root root 0000000 0000000 import meshio
from .._common import cell_data_from_raw, raw_from_cell_data, warn
from .._helpers import register_format
from ..xdmf.common import meshio_to_xdmf_type, xdmf_to_meshio_type
def read(filename):
import h5py
with h5py.File(filename, "r") as f:
assert f.attrs["type"] == "hmf"
assert f.attrs["version"] == "0.1-alpha"
assert len(f) == 1, "only one domain supported for now"
domain = f["domain"]
assert len(domain) == 1, "only one grid supported for now"
grid = domain["grid"]
points = None
cells = {}
point_data = {}
cell_data_raw = {}
for key, value in grid.items():
if key[:8] == "Topology":
cell_type = value.attrs["TopologyType"]
cells[xdmf_to_meshio_type[cell_type]] = value[()]
elif key == "Geometry":
# TODO is GeometryType really needed?
assert value.attrs["GeometryType"] in ["X", "XY", "XYZ"]
points = value[()]
elif key == "CellAttributes":
for name, ca in value.items():
cell_data_raw[name] = ca[()]
else:
assert key == "NodeAttributes"
for name, na in value.items():
point_data[name] = na[()]
cell_data = cell_data_from_raw(cells, cell_data_raw)
return meshio.Mesh(
points,
cells,
point_data=point_data,
cell_data=cell_data,
)
def write_points_cells(filename, points, cells, **kwargs):
write(filename, meshio.Mesh(points, cells), **kwargs)
def write(filename, mesh, compression="gzip", compression_opts=4):
import h5py
warn("Experimental file format. Format can change at any time.")
with h5py.File(filename, "w") as h5_file:
h5_file.attrs["type"] = "hmf"
h5_file.attrs["version"] = "0.1-alpha"
domain = h5_file.create_group("domain")
grid = domain.create_group("grid")
_write_points(grid, mesh.points, compression, compression_opts)
_write_cells(mesh.cells, grid, compression, compression_opts)
_write_point_data(mesh.point_data, grid, compression, compression_opts)
_write_cell_data(mesh.cell_data, grid, compression, compression_opts)
def _write_points(grid, points, compression, compression_opts):
geo = grid.create_dataset(
"Geometry",
data=points,
compression=compression,
compression_opts=compression_opts,
)
geo.attrs["GeometryType"] = "XYZ"[: points.shape[1]]
def _write_cells(cell_blocks, grid, compression, compression_opts):
for k, cell_block in enumerate(cell_blocks):
xdmf_type = meshio_to_xdmf_type[cell_block.type][0]
topo = grid.create_dataset(
f"Topology{k}",
data=cell_block.data,
compression=compression,
compression_opts=compression_opts,
)
topo.attrs["TopologyType"] = xdmf_type
# In XDMF, the point/cell data are stored as
#
#
#
# out.h5:/data2
#
#
#
# We cannot register multiple entries with the same name in HDF, so instead of
# "Attribute", use
# ```
# NodeAttributes
# -> name0 + data0
# -> name1 + data0
# -> ...
# CellAttributes
# -> ...
# ```
# Alternative:
# ```
# NodeAttribute0
# -> name
# -> data
# NodeAttribute1
# -> name
# -> data
# ...
# ```
# It's done similarly for Topologies (cells).
#
def _write_point_data(point_data, grid, compression, compression_opts):
na = grid.create_group("NodeAttributes")
for name, data in point_data.items():
na.create_dataset(
name,
data=data,
compression=compression,
compression_opts=compression_opts,
)
def _write_cell_data(cell_data, grid, compression, compression_opts):
raw = raw_from_cell_data(cell_data)
ca = grid.create_group("CellAttributes")
for name, data in raw.items():
ca.create_dataset(
name,
data=data,
compression=compression,
compression_opts=compression_opts,
)
register_format(
"hmf",
[".hmf"],
read,
{"hmf": write},
)
src/meshio/mdpa/ 0000775 0000000 0000000 00000000000 14562440725 0014063 5 ustar 00root root 0000000 0000000 src/meshio/mdpa/__init__.py 0000664 0000000 0000000 00000000074 14562440725 0016175 0 ustar 00root root 0000000 0000000 from ._mdpa import read, write
__all__ = ["read", "write"]
src/meshio/mdpa/_mdpa.py 0000664 0000000 0000000 00000040163 14562440725 0015521 0 ustar 00root root 0000000 0000000 """
I/O for KratosMultiphysics's mdpa format, cf.
.
The MDPA format is unsuitable for fast consumption, this is why:
.
"""
import numpy as np
from .._common import num_nodes_per_cell, raw_from_cell_data, warn
from .._exceptions import ReadError, WriteError
from .._files import open_file
from .._helpers import register_format
from .._mesh import Mesh
## We check if we can read/write the mesh natively from Kratos
# TODO: Implement native reading
# Translate meshio types to KratosMultiphysics codes
# Kratos uses the same node numbering of GiD pre and post processor
# http://www-opale.inrialpes.fr/Aerochina/info/en/html-version/gid_11.html
# https://github.com/KratosMultiphysics/Kratos/wiki/Mesh-node-ordering
_mdpa_to_meshio_type = {
"Line2D2": "line",
"Line3D2": "line",
"Triangle2D3": "triangle",
"Triangle3D3": "triangle",
"Quadrilateral2D4": "quad",
"Quadrilateral3D4": "quad",
"Tetrahedra3D4": "tetra",
"Hexahedra3D8": "hexahedron",
"Prism3D6": "wedge",
"Line2D3": "line3",
"Triangle2D6": "triangle6",
"Triangle3D6": "triangle6",
"Quadrilateral2D9": "quad9",
"Quadrilateral3D9": "quad9",
"Tetrahedra3D10": "tetra10",
"Hexahedra3D27": "hexahedron27",
"Point2D": "vertex",
"Point3D": "vertex",
"Quadrilateral2D8": "quad8",
"Quadrilateral3D8": "quad8",
"Hexahedra3D20": "hexahedron20",
}
_meshio_to_mdpa_type = {
"line": "Line2D2",
"triangle": "Triangle2D3",
"quad": "Quadrilateral2D4",
"tetra": "Tetrahedra3D4",
"hexahedron": "Hexahedra3D8",
"wedge": "Prism3D6",
"line3": "Line2D3",
"triangle6": "Triangle2D6",
"quad9": "Quadrilateral2D9",
"tetra10": "Tetrahedra3D10",
"hexahedron27": "Hexahedra3D27",
"vertex": "Point2D",
"quad8": "Quadrilateral2D8",
"hexahedron20": "Hexahedra3D20",
}
inverse_num_nodes_per_cell = {v: k for k, v in num_nodes_per_cell.items()}
local_dimension_types = {
"Line2D2": 1,
"Line3D2": 1,
"Triangle2D3": 2,
"Triangle3D3": 2,
"Quadrilateral2D4": 2,
"Quadrilateral3D4": 2,
"Tetrahedra3D4": 3,
"Hexahedra3D8": 3,
"Prism3D6": 3,
"Line2D3": 1,
"Triangle2D6": 2,
"Triangle3D6": 2,
"Quadrilateral2D9": 2,
"Quadrilateral3D9": 2,
"Tetrahedra3D10": 3,
"Hexahedra3D27": 3,
"Point2D": 0,
"Point3D": 0,
"Quadrilateral2D8": 2,
"Quadrilateral3D8": 2,
"Hexahedra3D20": 3,
}
def read(filename):
"""Reads a KratosMultiphysics mdpa file."""
# if (have_kratos is True): # TODO: Implement natively
# pass
# else:
with open_file(filename, "rb") as f:
mesh = read_buffer(f)
return mesh
def _read_nodes(f, is_ascii, data_size):
# Count the number of nodes. This is _extremely_ ugly; we first read the _entire_
# file until "End Nodes". The crazy thing is that first counting the lines, then
# skipping back to pos, and using fromfile there is _faster_ than accumulating the
# points into a list and converting them to a numpy array afterwards. A point count
# would be _really_ helpful here, but yeah, that's a fallacy of the format.
#
pos = f.tell()
num_nodes = 0
while True:
line = f.readline().decode()
if "End Nodes" in line:
break
num_nodes += 1
f.seek(pos)
points = np.fromfile(f, count=num_nodes * 4, sep=" ").reshape((num_nodes, 4))
# The first number is the index
points = points[:, 1:]
line = f.readline().decode()
if line.strip() != "End Nodes":
raise ReadError()
return points
def _read_cells(f, cells, is_ascii, cell_tags, environ=None):
if not is_ascii:
raise ReadError("Can only read ASCII cells")
# First we try to identify the entity
t = None
if environ is not None:
if environ.startswith("Begin Elements "):
entity_name = environ[15:]
for key in _mdpa_to_meshio_type:
if key in entity_name:
t = _mdpa_to_meshio_type[key]
break
elif environ.startswith("Begin Conditions "):
entity_name = environ[17:]
for key in _mdpa_to_meshio_type:
if key in entity_name:
t = _mdpa_to_meshio_type[key]
break
while True:
line = f.readline().decode()
if line.startswith("End Elements") or line.startswith("End Conditions"):
break
# data[0] gives the entity id
# data[1] gives the property id
# The rest are the ids of the nodes
data = [int(k) for k in filter(None, line.split())]
num_nodes_per_elem = len(data) - 2
# We use this in case not alternative
if t is None:
t = inverse_num_nodes_per_cell[num_nodes_per_elem]
if len(cells) == 0 or t != cells[-1][0]:
cells.append((t, []))
# Subtract one to account for the fact that python indices are 0-based.
cells[-1][1].append(np.array(data[-num_nodes_per_elem:]) - 1)
# Using the property id as tag
if t not in cell_tags:
cell_tags[t] = []
cell_tags[t].append([data[1]])
# Cannot convert cell_tags[key] to numpy array: There may be a
# different number of tags for each cell.
if line.strip() not in ["End Elements", "End Conditions"]:
raise ReadError()
def _prepare_cells(cells, cell_tags):
# Declaring has additional data tag
has_additional_tag_data = False
# restrict to the standard two data items (physical, geometrical)
output_cell_tags = {}
for key in cell_tags:
output_cell_tags[key] = {"gmsh:physical": [], "gmsh:geometrical": []}
for item in cell_tags[key]:
if len(item) > 0:
output_cell_tags[key]["gmsh:physical"].append(item[0])
if len(item) > 1:
output_cell_tags[key]["gmsh:geometrical"].append(item[1])
if len(item) > 2:
has_additional_tag_data = True
output_cell_tags[key]["gmsh:physical"] = np.array(
output_cell_tags[key]["gmsh:physical"], dtype=int
)
output_cell_tags[key]["gmsh:geometrical"] = np.array(
output_cell_tags[key]["gmsh:geometrical"], dtype=int
)
# Kratos cells are mostly ordered like VTK, with a few exceptions:
if "hexahedron20" in cells:
cells["hexahedron20"] = cells["hexahedron20"][
:, [0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 10, 9, 16, 19, 18, 17, 12, 13, 14, 15]
]
if "hexahedron27" in cells:
cells["hexahedron27"] = cells["hexahedron27"][
:,
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
11,
10,
9,
16,
19,
18,
17,
12,
13,
14,
15,
22,
24,
21,
23,
20,
25,
26,
],
]
return has_additional_tag_data
# def _read_data(f, tag, data_dict, is_ascii):
# if not is_ascii:
# raise ReadError()
# # Read string tags
# num_string_tags = int(f.readline().decode())
# string_tags = [
# f.readline().decode().strip().replace('"', "")
# for _ in range(num_string_tags)
# ]
# # The real tags typically only contain one value, the time.
# # Discard it.
# num_real_tags = int(f.readline().decode())
# for _ in range(num_real_tags):
# f.readline()
# num_integer_tags = int(f.readline().decode())
# integer_tags = [int(f.readline().decode()) for _ in range(num_integer_tags)]
# num_components = integer_tags[1]
# num_items = integer_tags[2]
#
# # Creating data
# data = np.fromfile(f, count=num_items * (1 + num_components), sep=" ").reshape(
# (num_items, 1 + num_components)
# )
# # The first number is the index
# data = data[:, 1:]
#
# line = f.readline().decode()
# if line.strip() != f"End {tag}":
# raise ReadError()
#
# # The gmsh format cannot distinguish between data of shape (n,) and (n, 1).
# # If shape[1] == 1, cut it off.
# if data.shape[1] == 1:
# data = data[:, 0]
#
# data_dict[string_tags[0]] = data
def read_buffer(f):
# The format is specified at
# .
# Initialize the optional data fields
points = []
cells = []
field_data = {}
cell_data = {}
# cell_data_raw = {}
cell_tags = {}
point_data = {}
is_ascii = True
data_size = None
# Definition of cell tags
cell_tags = {}
# Saving position
# pos = f.tell()
# Read mesh
while True:
line = f.readline().decode()
if not line:
# EOF
break
environ = line.strip()
if environ.startswith("Begin Nodes"):
points = _read_nodes(f, is_ascii, data_size)
elif environ.startswith("Begin Elements") or environ.startswith(
"Begin Conditions"
):
_read_cells(f, cells, is_ascii, cell_tags, environ)
# We finally prepare the cells
has_additional_tag_data = _prepare_cells(cells, cell_tags)
# Reverting to the original position
# f.seek(pos)
# Read data
# TODO: To implement
# while False:
# line = f.readline().decode()
# if not line:
# # EOF
# break
# # elif "NodalData" in environ and cells_prepared:
# # _read_data(f, "NodalData", point_data, data_size, is_ascii)
# # elif "Begin ElementalData" in environ:
# # _read_data(f, "ElementalData", cell_data_raw, data_size, is_ascii)
# # elif "Begin ConditionalData" in environ:
# # _read_data(f, "ConditionalData", cell_data_raw, data_size, is_ascii)
if has_additional_tag_data:
warn("The file contains tag data that couldn't be processed.")
# cell_data = cell_data_from_raw(cells, cell_data_raw)
## Merge cell_tags into cell_data
# for key, tag_dict in cell_tags.items():
# if key not in cell_data:
# cell_data[key] = {}
# for name, item_list in tag_dict.items():
# assert name not in cell_data[key]
# cell_data[key][name] = item_list
return Mesh(
points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data
)
def cell_data_from_raw(cells, cell_data_raw):
cell_data = {k: {} for k in cells}
for key in cell_data_raw:
d = cell_data_raw[key]
r = 0
for k in cells:
cell_data[k][key] = d[r : r + len(cells[k])]
r += len(cells[k])
return cell_data
def _write_nodes(fh, points, float_fmt, binary=False):
fh.write(b"Begin Nodes\n")
if binary:
raise WriteError()
for k, x in enumerate(points):
fmt = " {} " + " ".join(3 * ["{:" + float_fmt + "}"]) + "\n"
fh.write(fmt.format(k + 1, x[0], x[1], x[2]).encode())
fh.write(b"End Nodes\n\n")
def _write_elements_and_conditions(fh, cells, tag_data, binary=False, dimension=2):
if binary:
raise WriteError("Can only write ASCII")
# write elements
entity = "Elements"
dimension_name = f"{dimension}D"
wrong_dimension_name = "3D" if dimension == 2 else "2D"
consecutive_index = 0
for cell_block in cells:
cell_type = cell_block.type
node_idcs = cell_block.data
# NOTE: The names of the dummy conditions are not regular, require extra work
# local_dimension = local_dimension_types[cell_type]
# if (local_dimension < dimension):
# entity = "Conditions"
mdpa_cell_type = _meshio_to_mdpa_type[cell_type].replace(
wrong_dimension_name, dimension_name
)
fh.write(f"Begin {entity} {mdpa_cell_type}\n".encode())
# TODO: Add proper tag recognition in the future
fcd = np.empty((len(node_idcs), 0), dtype=np.int32)
for k, c in enumerate(node_idcs):
a1 = " ".join([str(val) for val in fcd[k]])
a2 = " ".join([str(cc) for cc in c + 1])
fh.write(
f" {consecutive_index + k + 1} {fcd.shape[1]} {a1} {a2}\n".encode()
)
consecutive_index += len(node_idcs)
fh.write(f"End {entity}\n\n".encode())
def _write_data(fh, tag, name, data, binary):
if binary:
raise WriteError()
fh.write(f"Begin {tag} {name}\n\n".encode())
# number of components
num_components = data.shape[1] if len(data.shape) > 1 else 1
# Cut off the last dimension in case it's 1. This avoids problems with
# writing the data.
if len(data.shape) > 1 and data.shape[1] == 1:
data = data[:, 0]
# Actually write the data
fmt = " ".join(["{}"] + ["{!r}"] * num_components) + "\n"
# TODO unify
if num_components == 1:
for k, x in enumerate(data):
fh.write(fmt.format(k + 1, x).encode())
else:
for k, x in enumerate(data):
fh.write(fmt.format(k + 1, *x).encode())
fh.write(f"End {tag} {name}\n\n".encode())
def write(filename, mesh, float_fmt=".16e", binary=False):
"""Writes mdpa files, cf.
.
"""
if binary:
raise WriteError()
if mesh.points.shape[1] == 2:
warn(
"mdpa requires 3D points, but 2D points given. "
"Appending 0 third component."
)
points = np.column_stack([mesh.points, np.zeros_like(mesh.points[:, 0])])
else:
points = mesh.points
# Kratos cells are mostly ordered like VTK, with a few exceptions:
cells = mesh.cells.copy()
if "hexahedron20" in cells:
cells["hexahedron20"] = cells["hexahedron20"][
:, [0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 10, 9, 16, 17, 18, 19, 12, 15, 14, 13]
]
if "hexahedron27" in cells:
cells["hexahedron27"] = cells["hexahedron27"][
:,
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
11,
10,
9,
16,
17,
18,
19,
12,
15,
14,
13,
22,
24,
21,
23,
20,
25,
26,
],
]
with open_file(filename, "wb") as fh:
# Write some additional info
fh.write(b"Begin ModelPartData\n")
fh.write(b"// VARIABLE_NAME value\n")
fh.write(b"End ModelPartData\n\n")
fh.write(b"Begin Properties 0\n")
fh.write(b"End Properties\n\n")
# Split the cell data: gmsh:physical and gmsh:geometrical are tags, the
# rest is actual cell data.
tag_data = {}
other_data = {}
for key, data in mesh.cell_data.items():
if key in ["gmsh:physical", "gmsh:geometrical"]:
tag_data[key] = [entry.astype(np.int32) for entry in data]
else:
other_data[key] = data
# identity dimension
dimension = 2
for c in cells:
name_elem = _meshio_to_mdpa_type[c.type]
if local_dimension_types[name_elem] == 3:
dimension = 3
break
# identify entities
_write_nodes(fh, points, float_fmt, binary)
_write_elements_and_conditions(fh, cells, tag_data, binary, dimension)
for name, dat in mesh.point_data.items():
_write_data(fh, "NodalData", name, dat, binary)
cell_data_raw = raw_from_cell_data(other_data)
for name, dat in cell_data_raw.items():
# assume always that the components are elements (for now)
_write_data(fh, "ElementalData", name, dat, binary)
register_format("mdpa", [".mdpa"], read, {"mdpa": write})
src/meshio/med/ 0000775 0000000 0000000 00000000000 14562440725 0013707 5 ustar 00root root 0000000 0000000 src/meshio/med/__init__.py 0000664 0000000 0000000 00000000073 14562440725 0016020 0 ustar 00root root 0000000 0000000 from ._med import read, write
__all__ = ["read", "write"]
src/meshio/med/_med.py 0000664 0000000 0000000 00000040707 14562440725 0015175 0 ustar 00root root 0000000 0000000 """
I/O for MED/Salome, cf.
.
"""
import numpy as np
from .._common import num_nodes_per_cell
from .._exceptions import ReadError, WriteError
from .._helpers import register_format
from .._mesh import Mesh
# https://docs.salome-platform.org/5/med/dev/med__outils_8hxx.html
meshio_to_med_type = {
"vertex": "PO1",
"line": "SE2",
"line3": "SE3",
"triangle": "TR3",
"triangle6": "TR6",
"quad": "QU4",
"quad8": "QU8",
"tetra": "TE4",
"tetra10": "T10",
"hexahedron": "HE8",
"hexahedron20": "H20",
"pyramid": "PY5",
"pyramid13": "P13",
"wedge": "PE6",
"wedge15": "P15",
}
med_to_meshio_type = {v: k for k, v in meshio_to_med_type.items()}
numpy_void_str = np.bytes_("")
def read(filename):
import h5py
f = h5py.File(filename, "r")
# Mesh ensemble
mesh_ensemble = f["ENS_MAA"]
meshes = mesh_ensemble.keys()
if len(meshes) != 1:
raise ReadError(f"Must only contain exactly 1 mesh, found {len(meshes)}.")
mesh_name = list(meshes)[0]
mesh = mesh_ensemble[mesh_name]
dim = mesh.attrs["ESP"]
# Possible time-stepping
if "NOE" not in mesh:
# One needs NOE (node) and MAI (French maillage, meshing) data. If they
# are not available in the mesh, check for time-steppings.
time_step = mesh.keys()
if len(time_step) != 1:
raise ReadError(
f"Must only contain exactly 1 time-step, found {len(time_step)}."
)
mesh = mesh[list(time_step)[0]]
# Initialize data
point_data = {}
cell_data = {}
field_data = {}
# Points
pts_dataset = mesh["NOE"]["COO"]
n_points = pts_dataset.attrs["NBR"]
points = pts_dataset[()].reshape((n_points, dim), order="F")
# Point tags
if "FAM" in mesh["NOE"]:
tags = mesh["NOE"]["FAM"][()]
point_data["point_tags"] = tags # replacing previous "point_tags"
# Information for point tags
point_tags = {}
fas = mesh["FAS"] if "FAS" in mesh else f["FAS"][mesh_name]
if "NOEUD" in fas:
point_tags = _read_families(fas["NOEUD"])
# CellBlock
cells = []
cell_types = []
med_cells = mesh["MAI"]
for med_cell_type, med_cell_type_group in med_cells.items():
cell_type = med_to_meshio_type[med_cell_type]
cell_types.append(cell_type)
nod = med_cell_type_group["NOD"]
n_cells = nod.attrs["NBR"]
cells += [(cell_type, nod[()].reshape(n_cells, -1, order="F") - 1)]
# Cell tags
if "FAM" in med_cell_type_group:
tags = med_cell_type_group["FAM"][()]
if "cell_tags" not in cell_data:
cell_data["cell_tags"] = []
cell_data["cell_tags"].append(tags)
# Information for cell tags
cell_tags = {}
if "ELEME" in fas:
cell_tags = _read_families(fas["ELEME"])
# Read nodal and cell data if they exist
try:
fields = f["CHA"] # champs (fields) in French
except KeyError:
pass
else:
profiles = f["PROFILS"] if "PROFILS" in f else None
_read_data(fields, profiles, cell_types, point_data, cell_data, field_data)
# Construct the mesh object
mesh = Mesh(
points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data
)
mesh.point_tags = point_tags
mesh.cell_tags = cell_tags
return mesh
def _read_data(fields, profiles, cell_types, point_data, cell_data, field_data):
for name, data in fields.items():
if "NOM" in data.attrs:
if "med:nom" not in field_data:
field_data["med:nom"] = []
field_data["med:nom"].append(data.attrs["NOM"].decode().split())
time_step = sorted(data.keys()) # associated time-steps
if len(time_step) == 1: # single time-step
names = [name] # do not change field name
else: # many time-steps
names = [None] * len(time_step)
for i, key in enumerate(time_step):
t = data[key].attrs["PDT"] # current time
names[i] = name + f"[{i:d}] - {t:g}"
# MED field can contain multiple types of data
for i, key in enumerate(time_step):
med_data = data[key] # at a particular time step
name = names[i]
for supp in med_data:
if supp == "NOE": # continuous nodal (NOEU) data
point_data[name] = _read_nodal_data(med_data, profiles)
else: # Gauss points (ELGA) or DG (ELNO) data
cell_type = med_to_meshio_type[supp.partition(".")[2]]
assert cell_type in cell_types
cell_index = cell_types.index(cell_type)
if name not in cell_data:
cell_data[name] = [None] * len(cell_types)
cell_data[name][cell_index] = _read_cell_data(
med_data[supp], profiles
)
def _read_nodal_data(med_data, profiles):
profile = med_data["NOE"].attrs["PFL"]
data_profile = med_data["NOE"][profile]
n_points = data_profile.attrs["NBR"]
if profile.decode() == "MED_NO_PROFILE_INTERNAL": # default profile with everything
values = data_profile["CO"][()].reshape(n_points, -1, order="F")
else:
n_data = profiles[profile].attrs["NBR"]
index_profile = profiles[profile]["PFL"][()] - 1
values_profile = data_profile["CO"][()].reshape(n_data, -1, order="F")
values = np.full((n_points, values_profile.shape[1]), np.nan)
values[index_profile] = values_profile
if values.shape[-1] == 1: # cut off for scalars
values = values[:, 0]
return values
def _read_cell_data(med_data, profiles):
profile = med_data.attrs["PFL"]
data_profile = med_data[profile]
n_cells = data_profile.attrs["NBR"]
n_gauss_points = data_profile.attrs["NGA"]
if profile.decode() == "MED_NO_PROFILE_INTERNAL": # default profile with everything
values = data_profile["CO"][()].reshape(n_cells, n_gauss_points, -1, order="F")
else:
n_data = profiles[profile].attrs["NBR"]
index_profile = profiles[profile]["PFL"][()] - 1
values_profile = data_profile["CO"][()].reshape(
n_data, n_gauss_points, -1, order="F"
)
values = np.full(
(n_cells, values_profile.shape[1], values_profile.shape[2]), np.nan
)
values[index_profile] = values_profile
# Only 1 data point per cell, shape -> (n_cells, n_components)
if n_gauss_points == 1:
values = values[:, 0, :]
if values.shape[-1] == 1: # cut off for scalars
values = values[:, 0]
return values
def _read_families(fas_data):
families = {}
for _, node_set in fas_data.items():
set_id = node_set.attrs["NUM"] # unique set id
n_subsets = node_set["GRO"].attrs["NBR"] # number of subsets
nom_dataset = node_set["GRO"]["NOM"][()] # (n_subsets, 80) of int8
name = [None] * n_subsets
for i in range(n_subsets):
name[i] = "".join([chr(x) for x in nom_dataset[i]]).strip().rstrip("\x00")
families[set_id] = name
return families
def write(filename, mesh):
import h5py
# MED doesn't support compression,
#
# compression = None
f = h5py.File(filename, "w")
# Strangely the version must be 3.0.x
# Any version >= 3.1.0 will NOT work with SALOME 8.3
info = f.create_group("INFOS_GENERALES")
info.attrs.create("MAJ", 3)
info.attrs.create("MIN", 0)
info.attrs.create("REL", 0)
# Meshes
mesh_ensemble = f.create_group("ENS_MAA")
mesh_name = "mesh"
med_mesh = mesh_ensemble.create_group(mesh_name)
med_mesh.attrs.create("DIM", mesh.points.shape[1]) # mesh dimension
med_mesh.attrs.create("ESP", mesh.points.shape[1]) # spatial dimension
med_mesh.attrs.create("REP", 0) # cartesian coordinate system (repère in French)
med_mesh.attrs.create("UNT", numpy_void_str) # time unit
med_mesh.attrs.create("UNI", numpy_void_str) # spatial unit
med_mesh.attrs.create("SRT", 1) # sorting type MED_SORT_ITDT
# component names:
names = ["X", "Y", "Z"][: mesh.points.shape[1]]
med_mesh.attrs.create("NOM", np.bytes_("".join(f"{name:<16}" for name in names)))
med_mesh.attrs.create("DES", np.bytes_("Mesh created with meshio"))
med_mesh.attrs.create("TYP", 0) # mesh type (MED_NON_STRUCTURE)
# Time-step
step = "-0000000000000000001-0000000000000000001" # NDT NOR
time_step = med_mesh.create_group(step)
time_step.attrs.create("CGT", 1)
time_step.attrs.create("NDT", -1) # no time step (-1)
time_step.attrs.create("NOR", -1) # no iteration step (-1)
time_step.attrs.create("PDT", -1.0) # current time
# Points
nodes_group = time_step.create_group("NOE")
nodes_group.attrs.create("CGT", 1)
nodes_group.attrs.create("CGS", 1)
profile = "MED_NO_PROFILE_INTERNAL"
nodes_group.attrs.create("PFL", np.bytes_(profile))
coo = nodes_group.create_dataset("COO", data=mesh.points.flatten(order="F"))
coo.attrs.create("CGT", 1)
coo.attrs.create("NBR", len(mesh.points))
# Point tags
if "point_tags" in mesh.point_data: # only works for med -> med
family = nodes_group.create_dataset("FAM", data=mesh.point_data["point_tags"])
family.attrs.create("CGT", 1)
family.attrs.create("NBR", len(mesh.points))
# Cells (mailles in French)
if len(mesh.cells) != len(np.unique([c.type for c in mesh.cells])):
raise WriteError("MED files cannot have two sections of the same cell type.")
cells_group = time_step.create_group("MAI")
cells_group.attrs.create("CGT", 1)
for k, cell_block in enumerate(mesh.cells):
cell_type = cell_block.type
cells = cell_block.data
med_type = meshio_to_med_type[cell_type]
med_cells = cells_group.create_group(med_type)
med_cells.attrs.create("CGT", 1)
med_cells.attrs.create("CGS", 1)
med_cells.attrs.create("PFL", np.bytes_(profile))
nod = med_cells.create_dataset("NOD", data=cells.flatten(order="F") + 1)
nod.attrs.create("CGT", 1)
nod.attrs.create("NBR", len(cells))
# Cell tags
if "cell_tags" in mesh.cell_data: # works only for med -> med
family = med_cells.create_dataset(
"FAM", data=mesh.cell_data["cell_tags"][k]
)
family.attrs.create("CGT", 1)
family.attrs.create("NBR", len(cells))
# Information about point and cell sets (familles in French)
fas = f.create_group("FAS")
families = fas.create_group(mesh_name)
family_zero = families.create_group("FAMILLE_ZERO") # must be defined in any case
family_zero.attrs.create("NUM", 0)
# For point tags
try:
if len(mesh.point_tags) > 0:
node = families.create_group("NOEUD")
_write_families(node, mesh.point_tags)
except AttributeError:
pass
# For cell tags
try:
if len(mesh.cell_tags) > 0:
element = families.create_group("ELEME")
_write_families(element, mesh.cell_tags)
except AttributeError:
pass
# Write nodal/cell data
fields = f.create_group("CHA")
name_idx = 0
field_names = mesh.field_data["med:nom"] if "med:nom" in mesh.field_data else []
# Nodal data
for name, data in mesh.point_data.items():
if name == "point_tags": # ignore point_tags already written under FAS
continue
supp = "NOEU" # nodal data
field_name = field_names[name_idx] if field_names else None
name_idx += 1
_write_data(fields, mesh_name, field_name, profile, name, supp, data)
# Cell data
# Only support writing ELEM fields with only 1 Gauss point per cell
# Or ELNO (DG) fields defined at every node per cell
for name, d in mesh.cell_data.items():
if name == "cell_tags": # ignore cell_tags already written under FAS
continue
for cell, data in zip(mesh.cells, d):
# Determine the nature of the cell data
# Either shape = (n_data, ) or (n_data, n_components) -> ELEM
# or shape = (n_data, n_gauss_points, n_components) -> ELNO or ELGA
med_type = meshio_to_med_type[cell.type]
if data.ndim <= 2:
supp = "ELEM"
elif data.shape[1] == num_nodes_per_cell[cell.type]:
supp = "ELNO"
else: # general ELGA data defined at unknown Gauss points
supp = "ELGA"
field_name = field_names[name_idx] if field_names else None
_write_data(
fields,
mesh_name,
field_name,
profile,
name,
supp,
data,
med_type,
)
name_idx += 1
def _write_data(
fields,
mesh_name,
field_name,
profile,
name,
supp,
data,
med_type=None,
):
# Skip for general ELGA fields defined at unknown Gauss points
if supp == "ELGA":
return
# Field
try: # a same MED field may contain fields of different natures
field = fields.create_group(name)
field.attrs.create("MAI", np.bytes_(mesh_name))
field.attrs.create("TYP", 6) # MED_FLOAT64
field.attrs.create("UNI", numpy_void_str) # physical unit
field.attrs.create("UNT", numpy_void_str) # time unit
n_components = 1 if data.ndim == 1 else data.shape[-1]
field.attrs.create("NCO", n_components) # number of components
# names = _create_component_names(n_components)
# field.attrs.create("NOM", np.bytes_("".join(f"{name:<16}" for name in names)))
if field_name:
field.attrs.create(
"NOM", np.bytes_("".join(f"{name:<16}" for name in field_name))
)
else:
field.attrs.create("NOM", np.bytes_(f"{'':<16}"))
# Time-step
step = "0000000000000000000100000000000000000001"
time_step = field.create_group(step)
time_step.attrs.create("NDT", 1) # time step 1
time_step.attrs.create("NOR", 1) # iteration step 1
time_step.attrs.create("PDT", 0.0) # current time
time_step.attrs.create("RDT", -1) # NDT of the mesh
time_step.attrs.create("ROR", -1) # NOR of the mesh
except ValueError: # name already exists
field = fields[name]
ts_name = list(field.keys())[-1]
time_step = field[ts_name]
# Field information
if supp == "NOEU":
typ = time_step.create_group("NOE")
elif supp == "ELNO":
typ = time_step.create_group("NOE." + med_type)
else: # 'ELEM' with only 1 Gauss points!
typ = time_step.create_group("MAI." + med_type)
typ.attrs.create("GAU", numpy_void_str) # no associated Gauss points
typ.attrs.create("PFL", np.bytes_(profile))
profile = typ.create_group(profile)
profile.attrs.create("NBR", len(data)) # number of data
if supp == "ELNO":
profile.attrs.create("NGA", data.shape[1])
else:
profile.attrs.create("NGA", 1)
profile.attrs.create("GAU", numpy_void_str)
# Dataset
profile.create_dataset("CO", data=data.flatten(order="F"))
def _create_component_names(n_components):
"""To be correctly read in a MED viewer, each component must be a string of width
16. Since we do not know the physical nature of the data, we just use V1, V2,...
"""
return [f"V{(i+1)}" for i in range(n_components)]
def _family_name(set_id, name):
"""Return the FAM object name corresponding to the unique set id and a list of
subset names
"""
return "FAM" + "_" + str(set_id) + "_" + "_".join(name)
def _write_families(fm_group, tags):
"""Write point/cell tag information under FAS/[mesh_name]"""
for set_id, name in tags.items():
family = fm_group.create_group(_family_name(set_id, name))
family.attrs.create("NUM", set_id)
group = family.create_group("GRO")
group.attrs.create("NBR", len(name)) # number of subsets
dataset = group.create_dataset("NOM", (len(name),), dtype="80int8")
for i in range(len(name)):
# make name 80 characters
name_80 = name[i] + "\x00" * (80 - len(name[i]))
# Needs numpy array, see
dataset[i] = np.array([ord(x) for x in name_80])
register_format("med", [".med"], read, {"med": write})
src/meshio/medit/ 0000775 0000000 0000000 00000000000 14562440725 0014244 5 ustar 00root root 0000000 0000000 src/meshio/medit/__init__.py 0000664 0000000 0000000 00000000075 14562440725 0016357 0 ustar 00root root 0000000 0000000 from ._medit import read, write
__all__ = ["read", "write"]
src/meshio/medit/_medit.py 0000664 0000000 0000000 00000041371 14562440725 0016065 0 ustar 00root root 0000000 0000000 """
I/O for Medit's format/Gamma Mesh Format,
Latest official up-to-date documentation and a reference C implementation at
"""
import struct
from ctypes import c_double, c_float
import numpy as np
from .._common import _pick_first_int_data, warn
from .._exceptions import ReadError
from .._files import open_file
from .._helpers import register_format
from .._mesh import Mesh
from ._medit_internal import medit_codes
def read(filename):
with open_file(filename) as f:
if str(filename)[-1] == "b":
mesh = read_binary_buffer(f)
else:
mesh = read_ascii_buffer(f)
return mesh
def _produce_dtype(string_type, dim, itype, ftype):
"""
convert a medit_code to a dtype appropriate for building a numpy array
"""
res = ""
c = 0
while c < len(string_type):
s = string_type[c]
if s == "i":
res += itype
elif s == "r":
res += ftype
elif s == "d":
res += str(dim)
c += 1
continue
else:
raise ReadError("Invalid string type")
c += 1
if c != len(string_type):
res += ","
return res
def read_binary_buffer(f):
meshio_from_medit = {
"GmfVertices": ("point", None),
"GmfEdges": ("line", 2),
"GmfTriangles": ("triangle", 3),
"GmfQuadrilaterals": ("quad", 4),
"GmfTetrahedra": ("tetra", 4),
"GmfPrisms": ("wedge", 6),
"GmfPyramids": ("pyramid", 5),
"GmfHexahedra": ("hexahedron", 8),
}
dim = 0
points = None
cells = []
point_data = {}
cell_data = {"medit:ref": []}
itype = ""
ftype = ""
postype = ""
# the file version
keytype = "i4"
code = np.fromfile(f, count=1, dtype=keytype).item()
if code != 1 and code != 16777216:
raise ReadError("Invalid code")
if code == 16777216:
# swap endianness
swapped = ">" if struct.unpack("=l", struct.pack(" 4:
raise ReadError("Invalid version")
if version == 1:
itype += "i4"
ftype += "f4"
postype += "i4"
elif version == 2:
itype += "i4"
ftype += "f8"
postype += "i4"
elif version == 3:
itype += "i4"
ftype += "f8"
postype += "i8"
else:
itype += "i8"
ftype += "f8"
postype += "i8"
field = np.fromfile(f, count=1, dtype=keytype).item()
if field != 3: # = GmfDimension
raise ReadError("Invalid dimension code : " + str(field) + " it should be 3")
np.fromfile(f, count=1, dtype=postype)
dim = np.fromfile(f, count=1, dtype=keytype).item()
if dim != 2 and dim != 3:
raise ReadError("Invalid mesh dimension : " + str(dim))
while True:
field = np.fromfile(f, count=1, dtype=keytype)
if field.size == 0:
msg = "End-of-file reached before GmfEnd keyword"
warn(msg)
break
field = field.item()
if field not in medit_codes.keys():
raise ReadError("Unsupported field")
field_code = medit_codes[field]
if field_code[0] == "GmfEnd":
break
if field_code[0] == "GmfReserved":
continue
np.fromfile(f, count=1, dtype=postype)
nitems = 1
if field_code[1] == "i":
nitems = np.fromfile(f, count=1, dtype=itype).item()
field_template = field_code[2]
dtype = np.dtype(_produce_dtype(field_template, dim, itype, ftype))
out = np.asarray(np.fromfile(f, count=nitems, dtype=dtype))
if field_code[0] not in meshio_from_medit.keys():
warn(f"meshio doesn't know {field_code[0]} type. Skipping.")
continue
elif field_code[0] == "GmfVertices":
points = out["f0"]
point_data["medit:ref"] = out["f1"]
else:
meshio_type, ncols = meshio_from_medit[field_code[0]]
# transform the structured array to integer array which suffices
# for the cell connectivity
out_view = out.view(itype).reshape(nitems, ncols + 1)
cells.append((meshio_type, out_view[:, :ncols] - 1))
cell_data["medit:ref"].append(out_view[:, -1])
return Mesh(points, cells, point_data=point_data, cell_data=cell_data)
def read_ascii_buffer(f):
dim = 0
cells = []
point_data = {}
cell_data = {"medit:ref": []}
meshio_from_medit = {
"Edges": ("line", 2),
"Triangles": ("triangle", 3),
"Quadrilaterals": ("quad", 4),
"Tetrahedra": ("tetra", 4),
"Prisms": ("wedge", 6),
"Pyramids": ("pyramid", 5),
"Hexahedra": ("hexahedron", 8), # Frey
"Hexaedra": ("hexahedron", 8), # Dobrzynski
}
points = None
dtype = None
while True:
line = f.readline()
if not line:
# EOF
break
line = line.strip()
if len(line) == 0 or line[0] == "#":
continue
items = line.split()
if not items[0].isalpha():
raise ReadError()
if items[0] == "MeshVersionFormatted":
version = items[1]
dtype = {"0": c_float, "1": c_float, "2": c_double}[version]
elif items[0] == "Dimension":
if len(items) >= 2:
dim = int(items[1])
else:
dim = int(
int(f.readline())
) # e.g. Dimension\n3, where the number of dimensions is on the next line
elif items[0] == "Vertices":
if dim <= 0:
raise ReadError()
if dtype is None:
raise ReadError("Expected `MeshVersionFormatted` before `Vertices`")
num_verts = int(f.readline())
out = np.fromfile(
f, count=num_verts * (dim + 1), dtype=dtype, sep=" "
).reshape(num_verts, dim + 1)
points = out[:, :dim]
point_data["medit:ref"] = out[:, dim].astype(int)
elif items[0] in meshio_from_medit:
meshio_type, points_per_cell = meshio_from_medit[items[0]]
# The first value is the number of elements
num_cells = int(f.readline())
out = np.fromfile(
f, count=num_cells * (points_per_cell + 1), dtype=int, sep=" "
).reshape(num_cells, points_per_cell + 1)
# adapt for 0-base
cells.append((meshio_type, out[:, :points_per_cell] - 1))
cell_data["medit:ref"].append(out[:, -1])
elif items[0] == "Corners":
# those are just discarded
num_corners = int(f.readline())
np.fromfile(f, count=num_corners, dtype=dtype, sep=" ")
elif items[0] == "Normals":
# those are just discarded
num_normals = int(f.readline())
np.fromfile(f, count=num_normals * dim, dtype=dtype, sep=" ").reshape(
num_normals, dim
)
elif items[0] == "NormalAtVertices":
# those are just discarded
num_normal_at_vertices = int(f.readline())
np.fromfile(
f, count=num_normal_at_vertices * 2, dtype=int, sep=" "
).reshape(num_normal_at_vertices, 2)
elif items[0] == "SubDomainFromMesh":
# those are just discarded
num_sub_domain_from_mesh = int(f.readline())
np.fromfile(
f, count=num_sub_domain_from_mesh * 4, dtype=int, sep=" "
).reshape(num_sub_domain_from_mesh, 4)
elif items[0] == "VertexOnGeometricVertex":
# those are just discarded
num_vertex_on_geometric_vertex = int(f.readline())
np.fromfile(
f, count=num_vertex_on_geometric_vertex * 2, dtype=int, sep=" "
).reshape(num_vertex_on_geometric_vertex, 2)
elif items[0] == "VertexOnGeometricEdge":
# those are just discarded
num_vertex_on_geometric_edge = int(f.readline())
np.fromfile(
f, count=num_vertex_on_geometric_edge * 3, dtype=float, sep=" "
).reshape(num_vertex_on_geometric_edge, 3)
elif items[0] == "EdgeOnGeometricEdge":
# those are just discarded
num_edge_on_geometric_edge = int(f.readline())
np.fromfile(
f, count=num_edge_on_geometric_edge * 2, dtype=int, sep=" "
).reshape(num_edge_on_geometric_edge, 2)
elif items[0] == "Identifier" or items[0] == "Geometry":
f.readline()
elif items[0] in [
"RequiredVertices",
"TangentAtVertices",
"Tangents",
"Ridges",
]:
msg = f"Meshio doesn't know keyword {items[0]}. Skipping."
warn(msg)
num_to_pass = int(f.readline())
for _ in range(num_to_pass):
f.readline()
else:
if items[0] != "End":
raise ReadError(f"Unknown keyword '{items[0]}'.")
if points is None:
raise ReadError("Expected `Vertices`")
return Mesh(points, cells, point_data=point_data, cell_data=cell_data)
def write(filename, mesh, float_fmt=".16e"):
if str(filename)[-1] == "b":
write_binary_file(filename, mesh)
else:
write_ascii_file(filename, mesh, float_fmt)
def write_ascii_file(filename, mesh, float_fmt=".16e"):
with open_file(filename, "wb") as fh:
version = {np.dtype(c_float): 1, np.dtype(c_double): 2}[mesh.points.dtype]
# N. B.: PEP 461 Adding % formatting to bytes and bytearray
fh.write(f"MeshVersionFormatted {version}\n".encode())
n, d = mesh.points.shape
fh.write(f"Dimension {d}\n".encode())
# vertices
fh.write(b"\nVertices\n")
fh.write(f"{n}\n".encode())
# pick out point data
labels_key, other = _pick_first_int_data(mesh.point_data)
if labels_key and other:
string = ", ".join(other)
warn(
"Medit can only write one point data array. "
f"Picking {labels_key}, skipping {string}."
)
labels = mesh.point_data[labels_key] if labels_key else np.ones(n, dtype=int)
fmt = " ".join(["{:" + float_fmt + "}"] * d) + " {:d}\n"
for x, label in zip(mesh.points, labels):
fh.write(fmt.format(*x, label).encode())
medit_from_meshio = {
"line": ("Edges", 2),
"triangle": ("Triangles", 3),
"quad": ("Quadrilaterals", 4),
"tetra": ("Tetrahedra", 4),
"wedge": ("Prisms", 6),
"pyramid": ("Pyramids", 5),
"hexahedron": ("Hexahedra", 8),
}
# pick out cell_data
labels_key, other = _pick_first_int_data(mesh.cell_data)
if labels_key and other:
string = ", ".join(other)
warn(
"Medit can only write one cell data array. "
f"Picking {labels_key}, skipping {string}."
)
for k, cell_block in enumerate(mesh.cells):
cell_type = cell_block.type
data = cell_block.data
try:
medit_name, num = medit_from_meshio[cell_type]
except KeyError:
msg = f"MEDIT's mesh format doesn't know {cell_type} cells. Skipping."
warn(msg)
continue
fh.write(b"\n")
fh.write(f"{medit_name}\n".encode())
fh.write(f"{len(data)}\n".encode())
# pick out cell data
labels = (
mesh.cell_data[labels_key][k]
if labels_key
else np.ones(len(data), dtype=data.dtype)
)
fmt = " ".join(["{:d}"] * (num + 1)) + "\n"
# adapt 1-base
for d, label in zip(data + 1, labels):
fh.write(fmt.format(*d, label).encode())
fh.write(b"\nEnd\n")
def write_binary_file(f, mesh):
with open_file(f, "wb") as fh:
version = 3
itype = "i4"
postype = "i8"
ftype = "f8"
# according to manual keywords are always written as i4 independently of
# the file version
keytype = "i4"
# if we store internally 64bit integers upgrade file version
has_big_ints = False
for cell_block in mesh.cells:
if cell_block.data.dtype.itemsize == 8:
has_big_ints = True
break
if has_big_ints:
itype = "i8"
version = 4
itype_size = np.dtype(itype).itemsize
ftype_size = np.dtype(ftype).itemsize
postype_size = np.dtype(postype).itemsize
keyword_size = np.dtype(keytype).itemsize
code = 1
field = 3 # GmfDimension
pos = 4 * keyword_size + postype_size
num_verts, dim = mesh.points.shape
header_type = np.dtype(",".join([keytype, keytype, keytype, postype, keytype]))
tmp_array = np.empty(1, dtype=header_type)
tmp_array["f0"] = code
tmp_array["f1"] = version
tmp_array["f2"] = field
tmp_array["f3"] = pos
tmp_array["f4"] = dim
tmp_array.tofile(fh)
# write points
field = 4 # GmfVertices
field_code = medit_codes[field]
pos += num_verts * dim * ftype_size
pos += num_verts * itype_size
pos += keyword_size + postype_size + itype_size
header_type = np.dtype(",".join([keytype, postype, itype]))
tmp_array = np.empty(1, dtype=header_type)
tmp_array["f0"] = field
tmp_array["f1"] = pos
tmp_array["f2"] = num_verts
tmp_array.tofile(fh)
field_template = field_code[2]
dtype = np.dtype(_produce_dtype(field_template, dim, itype, ftype))
labels_key, other = _pick_first_int_data(mesh.point_data)
if labels_key and other:
other_string = ", ".join(other)
warn(
"Medit can only write one point data array. "
f"Picking {labels_key}, skipping {other_string}."
)
labels = (
mesh.point_data[labels_key]
if labels_key
else np.ones(num_verts, dtype=itype)
)
tmp_array = np.empty(num_verts, dtype=dtype)
tmp_array["f0"] = mesh.points
tmp_array["f1"] = labels
tmp_array.tofile(fh)
labels_key, other = _pick_first_int_data(mesh.cell_data)
if labels_key and other:
string = ", ".join(other)
warn(
"Medit can only write one cell data array. "
f"Picking {labels_key}, skipping {string}."
)
# first component is medit keyword id see _medit_internal.py
medit_from_meshio = {
"line": 5,
"triangle": 6,
"quad": 7,
"tetra": 8,
"wedge": 9,
"pyramid": 49,
"hexahedron": 10,
}
for k, cell_block in enumerate(mesh.cells):
try:
medit_key = medit_from_meshio[cell_block.type]
except KeyError:
warn(
f"MEDIT's mesh format doesn't know {cell_block.type} cells. "
+ "Skipping."
)
continue
num_cells, num_verts = cell_block.data.shape
pos += num_cells * (num_verts + 1) * itype_size
pos += keyword_size + postype_size + itype_size
header_type = np.dtype(",".join([keytype, postype, itype]))
tmp_array = np.empty(1, dtype=header_type)
tmp_array["f0"] = medit_key
tmp_array["f1"] = pos
tmp_array["f2"] = num_cells
tmp_array.tofile(fh)
# pick out cell data
labels = (
mesh.cell_data[labels_key][k]
if labels_key
else np.ones(len(cell_block.data), dtype=cell_block.data.dtype)
)
field_template = medit_codes[medit_key][2]
dtype = np.dtype(_produce_dtype(field_template, dim, itype, ftype))
tmp_array = np.empty(num_cells, dtype=dtype)
i = 0
for col_type in dtype.names[:-1]:
tmp_array[col_type] = cell_block.data[:, i] + 1
i += 1
tmp_array[dtype.names[-1]] = labels
tmp_array.tofile(fh)
pos = 0
field = 54 # GmfEnd
header_type = np.dtype(",".join([keytype, postype]))
tmp_array = np.empty(1, dtype=header_type)
tmp_array["f0"] = field
tmp_array["f1"] = pos
tmp_array.tofile(fh)
register_format("medit", [".mesh", ".meshb"], read, {"medit": write})
src/meshio/medit/_medit_internal.py 0000664 0000000 0000000 00000024427 14562440725 0017764 0 ustar 00root root 0000000 0000000 # Key is the enum value of each keyword
# values follow the design of GmfKwdFmt array of
# https://github.com/LoicMarechal/libMeshb/blob/master/sources/libmeshb7.c
# For each keyword we assign whether there is a counter associated with it (second column) and
# its format as a string of numbers
#
# i:integer, f:float, d:dimension
medit_codes = {
0: ("GmfReserved", "", ""),
1: ("GmfMeshVersionFormatted", "", "i"),
2: ("GmfReserved", "", ""),
3: ("GmfDimension", "", "i"),
4: ("GmfVertices", "i", "dri"),
5: ("GmfEdges", "i", "iii"),
6: ("GmfTriangles", "i", "iiii"),
7: ("GmfQuadrilaterals", "i", "iiiii"),
8: ("GmfTetrahedra", "i", "iiiii"),
9: ("GmfPrisms", "i", "iiiiiii"),
10: ("GmfHexahedra", "i", "iiiiiiiii"),
11: ("GmfReserved", "", ""),
12: ("GmfReserved", "", ""),
13: ("GmfCorners", "i", "i"),
14: ("GmfRidges", "i", "i"),
15: ("GmfRequiredVertices", "i", "i"),
16: ("GmfRequiredEdges", "i", "i"),
17: ("GmfRequiredTriangles", "i", "i"),
18: ("GmfRequiredQuadrilaterals", "i", "i"),
19: ("GmfTangentAtEdgeVertices", "i", "iii"),
20: ("GmfNormalAtVertices", "i", "ii"),
21: ("GmfNormalAtTriangleVertices", "i", "iii"),
22: ("GmfNormalAtQuadrilateralVertices", "i", "iiii"),
23: ("GmfAngleOfCornerBound", "", "r"),
24: ("GmfTrianglesP2", "i", "iiiiiii"),
25: ("GmfEdgesP2", "i", "iiii"),
26: ("GmfSolAtPyramids", "i", "sr"),
27: ("GmfQuadrilateralsQ2", "i", "iiiiiiiiii"),
28: ("GmfISolAtPyramids", "i", "iiiii"),
29: ("GmfSubDomainFromGeom", "i", "iii"),
30: ("GmfTetrahedraP2", "i", "iiiiiiiiiii"),
31: ("GmfFault_NearTri", "i", "i"),
32: ("GmfFault_Inter", "i", "i"),
33: ("GmfHexahedraQ2", "i", "iiiiiiiiiiiiiiiiiiiiiiiiiiii"),
34: ("GmfExtraVerticesAtEdges", "i", "in"),
35: ("GmfExtraVerticesAtTriangles", "i", "in"),
36: ("GmfExtraVerticesAtQuadrilaterals", "i", "in"),
37: ("GmfExtraVerticesAtTetrahedra", "i", "in"),
38: ("GmfExtraVerticesAtPrisms", "i", "in"),
39: ("GmfExtraVerticesAtHexahedra", "i", "in"),
40: ("GmfVerticesOnGeometricVertices", "i", "ii"),
41: ("GmfVerticesOnGeometricEdges", "i", "iirr"),
42: ("GmfVerticesOnGeometricTriangles", "i", "iirrr"),
43: ("GmfVerticesOnGeometricQuadrilaterals", "i", "iirrr"),
44: ("GmfEdgesOnGeometricEdges", "i", "ii"),
45: ("GmfFault_FreeEdge", "i", "i"),
46: ("GmfPolyhedra", "i", "iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii"),
47: ("GmfPolygons", "", "iiiiiiiii"),
48: ("GmfFault_Overlap", "i", "i"),
49: ("GmfPyramids", "i", "iiiiii"),
50: ("GmfBoundingBox", "", "drdr"),
51: ("GmfReserved", "", ""),
52: ("GmfPrivateTable", "i", "i"),
53: ("GmfFault_BadShape", "i", "i"),
54: ("GmfEnd", "", ""),
55: ("GmfTrianglesOnGeometricTriangles", "i", "ii"),
56: ("GmfTrianglesOnGeometricQuadrilaterals", "i", "ii"),
57: ("GmfQuadrilateralsOnGeometricTriangles", "i", "ii"),
58: ("GmfQuadrilateralsOnGeometricQuadrilaterals", "i", "ii"),
59: ("GmfTangents", "i", "dr"),
60: ("GmfNormals", "i", "dr"),
61: ("GmfTangentAtVertices", "i", "ii"),
62: ("GmfSolAtVertices", "i", "sr"),
63: ("GmfSolAtEdges", "i", "sr"),
64: ("GmfSolAtTriangles", "i", "sr"),
65: ("GmfSolAtQuadrilaterals", "i", "sr"),
66: ("GmfSolAtTetrahedra", "i", "sr"),
67: ("GmfSolAtPrisms", "i", "sr"),
68: ("GmfSolAtHexahedra", "i", "sr"),
69: ("GmfDSolAtVertices", "i", "sr"),
70: ("GmfISolAtVertices", "i", "i"),
71: ("GmfISolAtEdges", "i", "ii"),
72: ("GmfISolAtTriangles", "i", "iii"),
73: ("GmfISolAtQuadrilaterals", "i", "iiii"),
74: ("GmfISolAtTetrahedra", "i", "iiii"),
75: ("GmfISolAtPrisms", "i", "iiiiii"),
76: ("GmfISolAtHexahedra", "i", "iiiiiiii"),
77: ("GmfIterations", "", "i"),
78: ("GmfTime", "", "r"),
79: ("GmfFault_SmallTri", "i", "i"),
80: ("GmfCoarseHexahedra", "i", "i"),
81: ("GmfComments", "i", "c"),
82: ("GmfPeriodicVertices", "i", "ii"),
83: ("GmfPeriodicEdges", "i", "ii"),
84: ("GmfPeriodicTriangles", "i", "ii"),
85: ("GmfPeriodicQuadrilaterals", "i", "ii"),
86: ("GmfPrismsP2", "i", "iiiiiiiiiiiiiiiiiii"),
87: ("GmfPyramidsP2", "i", "iiiiiiiiiiiiiii"),
88: ("GmfQuadrilateralsQ3", "i", "iiiiiiiiiiiiiiiii"),
89: ("GmfQuadrilateralsQ4", "i", "iiiiiiiiiiiiiiiiiiiiiiiiii"),
90: ("GmfTrianglesP3", "i", "iiiiiiiiiii"),
91: ("GmfTrianglesP4", "i", "iiiiiiiiiiiiiiii"),
92: ("GmfEdgesP3", "i", "iiiii"),
93: ("GmfEdgesP4", "i", "iiiiii"),
94: ("GmfIRefGroups", "i", "ciii"),
95: ("GmfDRefGroups", "i", "iii"),
96: ("GmfTetrahedraP3", "i", "iiiiiiiiiiiiiiiiiiiii"),
97: ("GmfTetrahedraP4", "i", "iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii"),
98: (
"GmfHexahedraQ3",
"i",
"iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii",
),
99: (
"GmfHexahedraQ4",
"i",
"iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii",
),
100: ("GmfPyramidsP3", "i", "iiiiiiiiiiiiiiiiiiiiiiiiiiiiiii"),
101: (
"GmfPyramidsP4",
"i",
"iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii",
),
102: ("GmfPrismsP3", "i", "iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii"),
103: (
"GmfPrismsP4",
"i",
"iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii",
),
104: ("GmfHOSolAtEdgesP1", "i", "hr"),
105: ("GmfHOSolAtEdgesP2", "i", "hr"),
106: ("GmfHOSolAtEdgesP3", "i", "hr"),
107: ("GmfHOSolAtTrianglesP1", "i", "hr"),
108: ("GmfHOSolAtTrianglesP2", "i", "hr"),
109: ("GmfHOSolAtTrianglesP3", "i", "hr"),
110: ("GmfHOSolAtQuadrilateralsQ1", "i", "hr"),
111: ("GmfHOSolAtQuadrilateralsQ2", "i", "hr"),
112: ("GmfHOSolAtQuadrilateralsQ3", "i", "hr"),
113: ("GmfHOSolAtTetrahedraP1", "i", "hr"),
114: ("GmfHOSolAtTetrahedraP2", "i", "hr"),
115: ("GmfHOSolAtTetrahedraP3", "i", "hr"),
116: ("GmfHOSolAtPyramidsP1", "i", "hr"),
117: ("GmfHOSolAtPyramidsP2", "i", "hr"),
118: ("GmfHOSolAtPyramidsP3", "i", "hr"),
119: ("GmfHOSolAtPrismsP1", "i", "hr"),
120: ("GmfHOSolAtPrismsP2", "i", "hr"),
121: ("GmfHOSolAtPrismsP3", "i", "hr"),
122: ("GmfHOSolAtHexahedraQ1", "i", "hr"),
123: ("GmfHOSolAtHexahedraQ2", "i", "hr"),
124: ("GmfHOSolAtHexahedraQ3", "i", "hr"),
125: ("GmfBezierBasis", "", "i"),
126: ("GmfByteFlow", "i", "i"),
127: ("GmfEdgesP2Ordering", "i", "i"),
128: ("GmfEdgesP3Ordering", "i", "i"),
129: ("GmfTrianglesP2Ordering", "i", "iii"),
130: ("GmfTrianglesP3Ordering", "i", "iii"),
131: ("GmfQuadrilateralsQ2Ordering", "i", "ii"),
132: ("GmfQuadrilateralsQ3Ordering", "i", "ii"),
133: ("GmfTetrahedraP2Ordering", "i", "iiii"),
134: ("GmfTetrahedraP3Ordering", "i", "iiii"),
135: ("GmfPyramidsP2Ordering", "i", "iii"),
136: ("GmfPyramidsP3Ordering", "i", "iii"),
137: ("GmfPrismsP2Ordering", "i", "iiii"),
138: ("GmfPrismsP3Ordering", "i", "iiii"),
139: ("GmfHexahedraQ2Ordering", "i", "iii"),
140: ("GmfHexahedraQ3Ordering", "i", "iii"),
141: ("GmfEdgesP1Ordering", "i", "i"),
142: ("GmfEdgesP4Ordering", "i", "i"),
143: ("GmfTrianglesP1Ordering", "i", "iii"),
144: ("GmfTrianglesP4Ordering", "i", "iii"),
145: ("GmfQuadrilateralsQ1Ordering", "i", "ii"),
146: ("GmfQuadrilateralsQ4Ordering", "i", "ii"),
147: ("GmfTetrahedraP1Ordering", "i", "iiii"),
148: ("GmfTetrahedraP4Ordering", "i", "iiii"),
149: ("GmfPyramidsP1Ordering", "i", "iii"),
150: ("GmfPyramidsP4Ordering", "i", "iii"),
151: ("GmfPrismsP1Ordering", "i", "iiii"),
152: ("GmfPrismsP4Ordering", "i", "iiii"),
153: ("GmfHexahedraQ1Ordering", "i", "iii"),
154: ("GmfHexahedraQ4Ordering", "i", "iii"),
155: ("GmfFloatingPointPrecision", "", "i"),
156: ("GmfHOSolAtEdgesP4", "i", "hr"),
157: ("GmfHOSolAtTrianglesP4", "i", "hr"),
158: ("GmfHOSolAtQuadrilateralsQ4", "i", "hr"),
159: ("GmfHOSolAtTetrahedraP4", "i", "hr"),
160: ("GmfHOSolAtPyramidsP4", "i", "hr"),
161: ("GmfHOSolAtPrismsP4", "i", "hr"),
162: ("GmfHOSolAtHexahedraQ4", "i", "hr"),
163: ("GmfHOSolAtEdgesP1NodesPositions", "i", "rr"),
164: ("GmfHOSolAtEdgesP2NodesPositions", "i", "rr"),
165: ("GmfHOSolAtEdgesP3NodesPositions", "i", "rr"),
166: ("GmfHOSolAtEdgesP4NodesPositions", "i", "rr"),
167: ("GmfHOSolAtTrianglesP1NodesPositions", "i", "rrr"),
168: ("GmfHOSolAtTrianglesP2NodesPositions", "i", "rrr"),
169: ("GmfHOSolAtTrianglesP3NodesPositions", "i", "rrr"),
170: ("GmfHOSolAtTrianglesP4NodesPositions", "i", "rrr"),
171: ("GmfHOSolAtQuadrilateralsQ1NodesPositions", "i", "rr"),
172: ("GmfHOSolAtQuadrilateralsQ2NodesPositions", "i", "rr"),
173: ("GmfHOSolAtQuadrilateralsQ3NodesPositions", "i", "rr"),
174: ("GmfHOSolAtQuadrilateralsQ4NodesPositions", "i", "rr"),
175: ("GmfHOSolAtTetrahedraP1NodesPositions", "i", "rrrr"),
176: ("GmfHOSolAtTetrahedraP2NodesPositions", "i", "rrrr"),
177: ("GmfHOSolAtTetrahedraP3NodesPositions", "i", "rrrr"),
178: ("GmfHOSolAtTetrahedraP4NodesPositions", "i", "rrrr"),
179: ("GmfHOSolAtPyramidsP1NodesPositions", "i", "rrr"),
180: ("GmfHOSolAtPyramidsP2NodesPositions", "i", "rrr"),
181: ("GmfHOSolAtPyramidsP3NodesPositions", "i", "rrr"),
182: ("GmfHOSolAtPyramidsP4NodesPositions", "i", "rrr"),
183: ("GmfHOSolAtPrismsP1NodesPositions", "i", "rrrr"),
184: ("GmfHOSolAtPrismsP2NodesPositions", "i", "rrrr"),
185: ("GmfHOSolAtPrismsP3NodesPositions", "i", "rrrr"),
186: ("GmfHOSolAtPrismsP4NodesPositions", "i", "rrrr"),
187: ("GmfHOSolAtHexahedraQ1NodesPositions", "i", "rrr"),
188: ("GmfHOSolAtHexahedraQ2NodesPositions", "i", "rrr"),
189: ("GmfHOSolAtHexahedraQ3NodesPositions", "i", "rrr"),
190: ("GmfHOSolAtHexahedraQ4NodesPositions", "i", "rrr"),
191: ("GmfEdgesReferenceElement", "", "rr"),
192: ("GmfTriangleReferenceElement", "", "rrrrrr"),
193: ("GmfQuadrilateralReferenceElement", "", "rrrrrrrr"),
194: ("GmfTetrahedronReferenceElement", "", "rrrrrrrrrrrr"),
195: ("GmfPyramidReferenceElement", "", "rrrrrrrrrrrrrrr"),
196: ("GmfPrismReferenceElement", "", "rrrrrrrrrrrrrrrrrr"),
197: ("GmfHexahedronReferenceElement", "", "rrrrrrrrrrrrrrrrrrrrrrrr"),
198: ("GmfBoundaryLayers", "i", "iii"),
}
src/meshio/nastran/ 0000775 0000000 0000000 00000000000 14562440725 0014610 5 ustar 00root root 0000000 0000000 src/meshio/nastran/__init__.py 0000664 0000000 0000000 00000000077 14562440725 0016725 0 ustar 00root root 0000000 0000000 from ._nastran import read, write
__all__ = ["read", "write"]
src/meshio/nastran/_nastran.py 0000664 0000000 0000000 00000043227 14562440725 0016777 0 ustar 00root root 0000000 0000000 """
I/O for Nastran bulk data.
"""
from __future__ import annotations
import numpy as np
from ..__about__ import __version__
from .._common import num_nodes_per_cell, warn
from .._exceptions import ReadError
from .._files import open_file
from .._helpers import register_format
from .._mesh import CellBlock, Mesh
nastran_to_meshio_type = {
"CELAS1": "vertex",
"CBEAM": "line",
"CBUSH": "line",
"CBUSH1D": "line",
"CROD": "line",
"CGAP": "line",
"CBAR": "line",
"CTRIAR": "triangle",
"CTRIA3": "triangle",
"CTRAX6": "triangle6",
"CTRIAX6": "triangle6",
"CTRIA6": "triangle6",
"CQUADR": "quad",
"CSHEAR": "quad",
"CQUAD4": "quad",
"CQUAD8": "quad8",
"CQUAD9": "quad9",
"CTETRA": "tetra",
"CTETRA_": "tetra10", # fictive
"CPYRAM": "pyramid",
"CPYRA": "pyramid",
"CPYRA_": "pyramid13", # fictive
"CPENTA": "wedge",
"CPENTA_": "wedge15", # fictive
"CHEXA": "hexahedron",
"CHEXA_": "hexahedron20", # fictive
}
nastran_solid_types = ["CTETRA", "CPYRA", "CPENTA", "CHEXA"]
meshio_to_nastran_type = {v: k for k, v in nastran_to_meshio_type.items()}
def read(filename):
with open_file(filename, "r") as f:
out = read_buffer(f)
return out
def read_buffer(f):
# Skip until BEGIN BULK
while True:
line = f.readline()
if not line:
raise RuntimeError('"BEGIN BULK" statement not found')
if line.strip().startswith("BEGIN BULK"):
break
# Reading data
points = []
points_id = []
cells = []
cells_id = []
cell = None
point_refs = []
cell_refs = []
cell_ref = None
def add_cell(nastran_type, cell, cell_ref):
cell_type = nastran_to_meshio_type[nastran_type]
cell = list(map(int, cell))
# Treat 2nd order CTETRA, CPYRA, CPENTA, CHEXA elements
if len(cell) > num_nodes_per_cell[cell_type]:
assert cell_type in [
"tetra",
"pyramid",
"wedge",
"hexahedron",
], f"Illegal cell type {cell_type}"
if cell_type == "tetra":
cell_type = "tetra10"
nastran_type = "CTETRA_"
elif cell_type == "pyramid":
cell_type = "pyramid13"
nastran_type = "CPYRA_"
elif cell_type == "wedge":
cell_type = "wedge15"
nastran_type = "CPENTA_"
elif cell_type == "hexahedron":
cell_type = "hexahedron20"
nastran_type = "CHEXA_"
cell = _convert_to_vtk_ordering(cell, nastran_type)
# decide if we should append cell or start a new cell block
if len(cells) > 0 and cells[-1][0] == cell_type:
cells[-1][1].append(cell)
cells_id[-1].append(cell_id)
if cell_ref is not None:
cell_refs[-1].append(cell_ref)
else:
cells.append((cell_type, [cell]))
cells_id.append([cell_id])
if cell_ref is not None:
cell_refs.append([cell_ref])
while True:
next_line = f.readline()
# Blank lines or comments
if len(next_line) < 4 or next_line.startswith(("$", "//", "#")):
continue
else:
break
while True:
# End loop when ENDDATA detected
if next_line.startswith("ENDDATA"):
break
# read line and merge with all continuation lines (starting with `+` or
# `*` or automatic continuation lines in fixed format)
chunks = []
c, _ = _chunk_line(next_line)
chunks.append(c)
while True:
next_line = f.readline()
if not next_line:
raise ReadError("Premature EOF")
# Blank lines or comments
if len(next_line) < 4 or next_line.startswith(("$", "//", "#")):
continue
elif next_line[0] in ["+", "*"]:
# From
# :
# You can manually specify a continuation by using a
# continuation identifier. A continuation identifier is a
# special character (+ or *) that indicates that the data
# continues on another line.
assert len(chunks[-1]) <= 10
if len(chunks[-1]) == 10:
# This is a continuation line, so the 10th chunk of the
# previous line must also be a continuation indicator.
# Sometimes its first character is a `+`, but it's not
# always present. Anyway, cut it off.
chunks[-1][-1] = None
c, _ = _chunk_line(next_line)
c[0] = None
chunks.append(c)
elif len(chunks[-1]) == 10 and chunks[-1][-1] == " ":
# automatic continuation: last chunk of previous line and first
# chunk of current line are spaces
c, _ = _chunk_line(next_line)
if c[0] == " ":
chunks[-1][9] = None
c[0] = None
chunks.append(c)
else:
# not a continuation
break
else:
break
# merge chunks according to large field format
# large field format: 8 + 16 + 16 + 16 + 16 + 8
if chunks[0][0].startswith("GRID*"):
new_chunks = []
for c in chunks:
d = [c[0]]
if len(c) > 1:
d.append(c[1])
if len(c) > 2:
d[-1] += c[2]
if len(c) > 3:
d.append(c[3])
if len(c) > 4:
d[-1] += c[4]
if len(c) > 5:
d.append(c[5])
if len(c) > 6:
d[-1] += c[6]
if len(c) > 7:
d.append(c[7])
if len(c) > 8:
d[-1] += c[8]
if len(c) > 9:
d.append(c[9])
new_chunks.append(d)
chunks = new_chunks
# flatten
chunks = [item for sublist in chunks for item in sublist]
# remove None (continuation blocks)
chunks = [chunk for chunk in chunks if chunk is not None]
# strip chunks
chunks = [chunk.strip() for chunk in chunks]
keyword = chunks[0]
# Points
if keyword in ["GRID", "GRID*"]:
point_id = int(chunks[1])
pref = chunks[2].strip()
if len(pref) > 0:
point_refs.append(int(pref))
points_id.append(point_id)
points.append([_nastran_string_to_float(i) for i in chunks[3:6]])
# CellBlock
elif keyword in nastran_to_meshio_type:
cell_id = int(chunks[1])
cell_ref = chunks[2].strip()
cell_ref = int(cell_ref) if len(cell_ref) > 0 else None
if keyword in ["CBAR", "CBEAM", "CBUSH", "CBUSH1D", "CGAP"]:
# Most Nastran 1D elements contain a third node (in the form of a node id or coordinates) to specify the local coordinate system:
# https://docs.plm.automation.siemens.com/data_services/resources/nxnastran/10/help/en_US/tdocExt/pdf/QRG.pdf
# For example, a CBAR line can be
# ```
# CBAR 37 3 11.0 0.0 0.0
# ```
# where the last three floats specify the orientation vector.
# This information is removed.
cell = chunks[3:5]
else:
cell = chunks[3:]
# remove empty chunks
cell = [item for item in cell if item != ""]
if cell is not None:
add_cell(keyword, cell, cell_ref)
# Convert to numpy arrays
points = np.array(points)
points_id = np.array(points_id, dtype=int)
for k, (c, cid) in enumerate(zip(cells, cells_id)):
cells[k] = CellBlock(c[0], np.array(c[1], dtype=int))
cells_id[k] = np.array(cid, dtype=int)
# Convert to natural point ordering
# https://stackoverflow.com/questions/16992713/translate-every-element-in-numpy-array-according-to-key
points_id_dict = dict(zip(points_id, np.arange(len(points), dtype=int)))
points_id_get = np.vectorize(points_id_dict.__getitem__)
for k, c in enumerate(cells):
cells[k] = CellBlock(c.type, points_id_get(c.data))
# Construct the mesh object
mesh = Mesh(points, cells)
mesh.points_id = points_id
mesh.cells_id = cells_id
if len(point_refs) > 0:
mesh.point_data["nastran:ref"] = np.array(point_refs)
if len(cell_refs) > 0:
mesh.cell_data["nastran:ref"] = [np.array(i) for i in cell_refs]
return mesh
# There are two basic categories of input data formats in NX Nastran:
#
# - "Free" format data, in which the data fields are simply separated by
# commas. This type of data is known as free field data.
#
# - "Fixed" format data, in which your data must be aligned in columns of
# specific width. There are two subcategories of fixed format data that differ
# based on the size of the fixed column width:
#
# - Small field format, in which a single line of data is divided into 10
# fields that can contain eight characters each.
#
# - Large field format, in which a single line of input is expanded into
# two lines The first and last fields on each line are eight columns wide,
# while the intermediate fields are sixteen columns wide. The large field
# format is useful when you need greater numerical accuracy.
#
# See: https://docs.plm.automation.siemens.com/data_services/resources/nxnastran/10/help/en_US/tdocExt/pdf/User.pdf
def write(filename, mesh, point_format="fixed-large", cell_format="fixed-small"):
if point_format == "free":
grid_fmt = "GRID,{:d},{:s},{:s},{:s},{:s}\n"
float_fmt = _float_to_nastran_string
elif point_format == "fixed-small":
grid_fmt = "GRID {:<8d}{:<8s}{:>8s}{:>8s}{:>8s}\n"
float_fmt = _float_rstrip
elif point_format == "fixed-large":
grid_fmt = "GRID* {:<16d}{:<16s}{:>16s}{:>16s}\n* {:>16s}\n"
float_fmt = _float_to_nastran_string
else:
raise RuntimeError(f'unknown "{format}" format')
if cell_format == "free":
int_fmt, cell_info_fmt = "{:d}", "{:s},{:d},{:s},"
sjoin = ","
elif cell_format == "fixed-small":
int_fmt, cell_info_fmt = "{:<8d}", "{:<8s}{:<8d}{:<8s}"
sjoin, cchar = "", "+"
nipl1, nipl2 = 6, 14
elif cell_format == "fixed-large":
int_fmt, cell_info_fmt = "{:<16d}", "{:<8s}{:<16d}{:<16s}"
sjoin, cchar = "", "*"
nipl1, nipl2 = 2, 6
else:
raise RuntimeError(f'unknown "{format}" format')
if mesh.points.shape[1] == 2:
warn(
"Nastran requires 3D points, but 2D points given. "
"Appending 0 third component."
)
points = np.column_stack([mesh.points, np.zeros_like(mesh.points[:, 0])])
else:
points = mesh.points
with open_file(filename, "w") as f:
f.write(f"$ Nastran file written by meshio v{__version__}\n")
f.write("BEGIN BULK\n")
# Points
point_refs = mesh.point_data.get("nastran:ref", None)
for point_id, x in enumerate(points):
fx = [float_fmt(k) for k in x]
pref = str(point_refs[point_id]) if point_refs is not None else ""
string = grid_fmt.format(point_id + 1, pref, fx[0], fx[1], fx[2])
f.write(string)
# CellBlock
cell_id = 0
cell_refs = mesh.cell_data.get("nastran:ref", None)
for ict, cell_block in enumerate(mesh.cells):
cell_type = cell_block.type
cells = cell_block.data
nastran_type = meshio_to_nastran_type[cell_type]
if cell_format.endswith("-large"):
nastran_type += "*"
if cell_refs is not None:
cell_refs_t = cell_refs[ict]
else:
cell_ref = ""
for ic, cell in enumerate(cells):
if cell_refs is not None:
cell_ref = str(int(cell_refs_t[ic]))
cell_id += 1
cell_info = cell_info_fmt.format(nastran_type, cell_id, cell_ref)
cell1 = cell + 1
cell1 = _convert_to_nastran_ordering(cell1, nastran_type)
conn = sjoin.join(int_fmt.format(nid) for nid in cell1[:nipl1])
if len(cell1) > nipl1:
if cell_format == "free":
cflag1 = cflag3 = ""
cflag2 = cflag4 = "+,"
else:
cflag1 = cflag2 = f"{cchar}1{cell_id:<6x}"
cflag3 = cflag4 = f"{cchar}2{cell_id:<6x}"
f.write(cell_info + conn + cflag1 + "\n")
conn = sjoin.join(int_fmt.format(nid) for nid in cell1[nipl1:nipl2])
if len(cell1) > nipl2:
f.write(cflag2 + conn + cflag3 + "\n")
conn = sjoin.join(int_fmt.format(nid) for nid in cell1[nipl2:])
f.write(cflag4 + conn + "\n")
else:
f.write(cflag2 + conn + "\n")
else:
f.write(cell_info + conn + "\n")
f.write("ENDDATA\n")
def _float_rstrip(x, n=8):
return f"{x:f}".rstrip("0")[:n]
def _float_to_nastran_string(value, length=16):
"""
From
:
Real numbers, including zero, must contain a decimal point. You can enter
real numbers in a variety of formats. For example, the following are all
acceptable versions of the real number, seven:
```
7.0 .7E1 0.7+1
.70+1 7.E+0 70.-1
```
This methods converts a float value into the corresponding string. Choose
the variant with `E` to make the file less ambigious when edited by a
human. (`5.-1` looks like 4.0, not 5.0e-1 = 0.5.)
Examples:
1234.56789 --> "1.23456789E+3"
-0.1234 --> "-1.234E-1"
3.1415926535897932 --> "3.14159265359E+0"
"""
out = np.format_float_scientific(value, exp_digits=1, precision=11).replace(
"e", "E"
)
assert len(out) <= 16
return out
# The following is the manual float conversion. Keep it around for a while in case
# we still need it.
# aux = length - 2
# # sfmt = "{" + f":{length}s" + "}"
# sfmt = "{" + ":s" + "}"
# pv_fmt = "{" + f":{length}.{aux}e" + "}"
# if value == 0.0:
# return sfmt.format("0.")
# python_value = pv_fmt.format(value) # -1.e-2
# svalue, sexponent = python_value.strip().split("e")
# exponent = int(sexponent) # removes 0s
# sign = "-" if abs(value) < 1.0 else "+"
# # the exponent will be added later...
# sexp2 = str(exponent).strip("-+")
# value2 = float(svalue)
# # the plus 1 is for the sign
# len_sexp = len(sexp2) + 1
# leftover = length - len_sexp
# leftover = leftover - 3 if value < 0 else leftover - 2
# fmt = "{" + f":1.{leftover:d}f" + "}"
# svalue3 = fmt.format(value2)
# svalue4 = svalue3.strip("0")
# field = sfmt.format(svalue4 + sign + sexp2)
# return field
def _nastran_string_to_float(string):
try:
return float(string)
except ValueError:
string = string.strip()
return float(string[0] + string[1:].replace("+", "e+").replace("-", "e-"))
def _chunk_line(line: str) -> tuple[list[str], bool]:
# remove terminal newline
assert line[-1] == "\n"
line = line[:-1]
if "," in line:
# free format
return line.split(","), True
# fixed format
CHUNK_SIZE = 8
chunks = [line[i : CHUNK_SIZE + i] for i in range(0, len(line), CHUNK_SIZE)]
return chunks, False
def _convert_to_vtk_ordering(cell, nastran_type):
if nastran_type in ["CTRAX6", "CTRIAX6"]:
cell = [cell[i] for i in [0, 2, 4, 1, 3, 5]]
elif nastran_type == "CHEXA_":
cell = [
cell[i]
for i in [
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
16,
17,
18,
19,
12,
13,
14,
15,
]
]
elif nastran_type == "CPENTA_":
cell = [cell[i] for i in [0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 9, 10, 11]]
return cell
def _convert_to_nastran_ordering(cell, nastran_type):
if nastran_type in ["CTRAX6", "CTRIAX6"]:
cell = [cell[i] for i in [0, 3, 1, 4, 2, 5]]
elif nastran_type == "CHEXA_":
cell = [
cell[i]
for i in [
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
16,
17,
18,
19,
12,
13,
14,
15,
]
]
elif nastran_type == "CPENTA_":
cell = [cell[i] for i in [0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 9, 10, 11]]
return cell
register_format("nastran", [".bdf", ".fem", ".nas"], read, {"nastran": write})
src/meshio/netgen/ 0000775 0000000 0000000 00000000000 14562440725 0014422 5 ustar 00root root 0000000 0000000 src/meshio/netgen/__init__.py 0000664 0000000 0000000 00000000076 14562440725 0016536 0 ustar 00root root 0000000 0000000 from ._netgen import read, write
__all__ = ["read", "write"]
src/meshio/netgen/_netgen.py 0000664 0000000 0000000 00000031654 14562440725 0016424 0 ustar 00root root 0000000 0000000 """
I/O for Netgen mesh files
.
"""
import numpy as np
from ..__about__ import __version__
from .._common import warn
from .._files import open_file
from .._helpers import register_format
from .._mesh import Mesh
def _fast_forward_over_blank_lines(f):
is_eof = False
while True:
line = f.readline()
if not line:
is_eof = True
break
line = line.strip()
if len(line) > 0 and not line.startswith("#"):
break
return line, is_eof
netgen_codims = {"materials": 0, "bcnames": 1, "cd2names": 2, "cd3names": 3}
netgen0d_to_meshio_type = {
1: "vertex",
}
netgen1d_to_meshio_type = {
2: "line",
}
netgen2d_to_meshio_type = {
3: "triangle",
6: "triangle6",
4: "quad",
8: "quad8",
}
netgen3d_to_meshio_type = {
4: "tetra",
5: "pyramid",
6: "wedge",
8: "hexahedron",
10: "tetra10",
13: "pyramid13",
15: "wedge15",
20: "hexahedron20",
}
netgen_to_meshio_type = {
0: netgen0d_to_meshio_type,
1: netgen1d_to_meshio_type,
2: netgen2d_to_meshio_type,
3: netgen3d_to_meshio_type,
}
netgen_to_meshio_pmap = {
"vertex": [0],
"line": [0, 1],
"triangle": list(range(3)),
"triangle6": [0, 1, 2, 5, 3, 4],
"quad": list(range(4)),
"quad8": [0, 1, 2, 3, 4, 7, 5, 6],
"tetra": [0, 2, 1, 3],
"tetra10": [0, 2, 1, 3, 5, 7, 4, 6, 9, 8],
"pyramid": [0, 3, 2, 1, 4],
"pyramid13": [0, 3, 2, 1, 4, 7, 6, 8, 5, 9, 12, 11, 10],
"wedge": [0, 2, 1, 3, 5, 4],
"wedge15": [0, 2, 1, 3, 5, 4, 7, 8, 6, 13, 14, 12, 9, 11, 10],
"hexahedron": [0, 3, 2, 1, 4, 7, 6, 5],
"hexahedron20": [
0,
3,
2,
1,
4,
7,
6,
5,
10,
9,
11,
8,
16,
19,
18,
17,
14,
13,
15,
12,
],
}
meshio_to_netgen_pmap = {}
for t, pmap in netgen_to_meshio_pmap.items():
n = len(pmap)
ipmap = list(range(n))
for i in range(n):
ipmap[pmap[i]] = i
meshio_to_netgen_pmap[t] = ipmap
def read(filename):
if str(filename).endswith(".vol.gz"):
import gzip
with gzip.open(filename, "rt") as f:
return read_buffer(f)
with open_file(filename, "r") as f:
return read_buffer(f)
def _read_cells(f, netgen_cell_type, cells, cells_index, skip_every_other_line=False):
if netgen_cell_type == "pointelements":
dim = 0
nump = 1
pi0 = 0
i_index = 1
elif netgen_cell_type.startswith("edgesegments"):
dim = 1
nump = 2
pi0 = 2
i_index = 0
elif netgen_cell_type.startswith("surfaceelements"):
dim = 2
pi0 = 5
i_index = 1
elif netgen_cell_type == "volumeelements":
dim = 3
pi0 = 2
i_index = 0
else:
raise ValueError(f"Unknown Netgen cell section '{netgen_cell_type}'")
num_cells = int(f.readline())
tmap = netgen_to_meshio_type[dim]
for _ in range(num_cells):
line, _ = _fast_forward_over_blank_lines(f)
data = list(filter(None, line.split(" ")))
index = int(data[i_index])
if dim == 2:
nump = int(data[4])
elif dim == 3:
nump = int(data[1])
pi = list(map(int, data[pi0 : pi0 + nump]))
t = tmap[nump]
if len(cells) == 0 or t != cells[-1][0]:
cells.append((t, []))
cells_index.append([])
cells[-1][1].append(pi)
cells_index[-1].append(index)
if skip_every_other_line:
line, _ = _fast_forward_over_blank_lines(f)
def _write_cells(f, cell_block, index=None):
if len(cell_block) == 0:
return
pmap = np.array(meshio_to_netgen_pmap[cell_block.type])
post_data = []
pre_data = []
i_index = 0
if cell_block.dim == 0:
post_data = [1]
i_index = 1
elif cell_block.dim == 1:
pre_data = [1, 0]
post_data = [-1, -1, 0, 0, 1, 0, 1, 0]
elif cell_block.dim == 2:
pre_data = [1, 1, 0, 0, len(pmap)]
i_index = 1
elif cell_block.dim == 3:
pre_data = [1, len(pmap)]
else:
raise ValueError(f"Invalid cell dimension: {cell_block.dim}")
col1 = len(pre_data)
col2 = col1 + len(pmap)
col3 = col2 + len(post_data)
pi = np.zeros((len(cell_block), col3), dtype=np.int32)
pi[:, :col1] = np.repeat([pre_data], len(cell_block), axis=0)
pi[:, col1:col2] = cell_block.data[:, pmap] + 1
pi[:, col2:] = np.repeat([post_data], len(cell_block), axis=0)
if index is not None:
pi[:, i_index] = index
np.savetxt(f, pi, "%i")
def _skip_block(f):
n = int(f.readline())
for _ in range(n):
f.readline()
def _write_codim_domain_data(f, mesh, cells_index, dim, codim):
# assume format as read from gmsh 4.1 files
data = {}
for name, val in mesh.field_data.items():
if val[1] == dim - codim:
data[val[0]] = name
# set generic default names (is this appropriate/useful?)
if len(data) == 0:
indices = set()
for cell_block, index in zip(mesh.cells, cells_index):
if index is None:
continue
if cell_block.dim == dim - codim:
indices = indices.union(set(index))
for idx in indices:
data[idx] = f"cd{codim:d}_{idx:d}"
if len(data) == 0:
return
codim_tag = [kk for kk, vv in netgen_codims.items() if vv == codim][0]
f.write(f"\n{codim_tag:s}\n")
ncd = max(data.keys())
f.write(f"{ncd:d}\n")
for idx in range(1, ncd + 1):
f.write("{:d} {:s}\n".format(idx, data.get(idx, "")))
def read_buffer(f):
points = []
cells = []
cells_index = []
field_data = {}
identifications = None
identificationtypes = None
have_edgesegmentsgi2_in_two_lines = False
line, is_eof = _fast_forward_over_blank_lines(f)
if line != "mesh3d":
raise RuntimeError("Not a valid Netgen mesh")
while True:
line, is_eof = _fast_forward_over_blank_lines(f)
if is_eof:
break
elif line == "dimension":
dimension = int(f.readline())
elif line == "geomtype":
geomtype = int(f.readline())
if geomtype not in [0, 1, 10, 11, 12, 13]:
warn(f"Unknown geomtype in Netgen mesh: {geomtype}")
elif line == "points":
num_points = int(f.readline())
if num_points > 0:
points = np.loadtxt(f, max_rows=num_points)
if dimension != 3:
points = points[:, :dimension]
elif line in [
"pointelements",
"edgesegments",
"edgesegmentsgi",
"edgesegmentsgi2",
"surfaceelements",
"surfaceelementsgi",
"surfaceelementsuv",
"volumeelements",
]:
_read_cells(f, line, cells, cells_index, have_edgesegmentsgi2_in_two_lines)
elif line == "endmesh":
break
elif line.split() == ["surf1", "surf2", "p1", "p2"]:
# if this line is present, the edgesegmentsgi2 info is split in two lines per data set
have_edgesegmentsgi2_in_two_lines = True
elif line in netgen_codims.keys():
edim = dimension - netgen_codims[line]
num_entries = int(f.readline())
for _ in range(num_entries):
line = f.readline().split()
if len(line) != 2:
continue
idx, name = line
field_data[name] = [int(idx), edim]
elif line == "identifications":
num_entries = int(f.readline())
if num_entries > 0:
identifications = np.loadtxt(
f, max_rows=num_entries, dtype=int
).reshape(num_entries, 3)
elif line == "identificationtypes":
num_entries = int(f.readline())
if num_entries > 0:
identificationtypes = np.loadtxt(f, max_rows=1, dtype=int).reshape(
1, num_entries
)
elif line in [
"face_colours",
"singular_edge_left",
"singular_edge_right",
"singular_face_inside",
"singular_face_outside",
"singular_points",
]:
_skip_block(f)
else:
raise RuntimeError(f"Unknown Netgen mesh token: {line}")
# convert to numpy arrays
# subtract one (netgen counts 1-based)
# apply permutation of vertex numbers
for k, (t, data) in enumerate(cells):
pmap = netgen_to_meshio_pmap[t]
d = np.array(data, dtype=np.uint32)
d[:, :] = d[:, pmap] - 1
cells[k] = (t, d)
# currently, there is no better place for identification data
kwargs = {}
if identifications is not None:
kwargs["info"] = {
"netgen:identifications": identifications,
"netgen:identificationtypes": identificationtypes,
}
mesh = Mesh(
points,
cells,
cell_data={"netgen:index": cells_index},
field_data=field_data,
**kwargs,
)
return mesh
def write(filename, mesh, float_fmt=".16e"):
if str(filename).endswith(".vol.gz"):
import gzip
with gzip.open(filename, "wt") as f:
write_buffer(f, mesh, float_fmt)
return
with open_file(filename, "w") as f:
write_buffer(f, mesh, float_fmt)
def write_buffer(f, mesh, float_fmt):
_, dimension = mesh.points.shape
cells_per_dim = [0, 0, 0, 0]
# Netgen can store one cell_index, i.e., integer cell data. Pick one in
# mesh.cell_data, and prefer "netgen:index" if present. Unfortunately, netgen cannot
# store the name of the data; when reading, it will always be "netgen:index".
# See also .
if "netgen:index" in mesh.cell_data:
cells_index = mesh.cell_data["netgen:index"]
else:
# any other integer cell data?
cells_index = None
for values in mesh.cell_data.values():
if np.issubdtype(values[0].dtype, np.integer):
cells_index = values
break
if cells_index is None:
cells_index = [None] * len(mesh.cells)
for cell_block in mesh.cells:
cells_per_dim[cell_block.dim] += len(cell_block)
f.write(f"# Generated by meshio {__version__}\n")
f.write("mesh3d\n\n")
f.write("dimension\n")
f.write(f"{dimension}\n\n")
f.write("geomtype\n")
f.write("0\n")
f.write("\n# surfnr bcnr domin domout np p1 p2 p3\n")
f.write("surfaceelements\n")
f.write(f"{cells_per_dim[2]}\n")
for cell_block, index in zip(mesh.cells, cells_index):
if cell_block.dim == 2:
_write_cells(f, cell_block, index)
f.write("\n# matnr np p1 p2 p3 p4\n")
f.write("volumeelements\n")
f.write(f"{cells_per_dim[3]}\n")
for cell_block, index in zip(mesh.cells, cells_index):
if cell_block.dim == 3:
_write_cells(f, cell_block, index)
f.write(
"\n# surfid 0 p1 p2 trignum1 trignum2 domin/surfnr1 domout/surfnr2 ednr1 dist1 ednr2 dist2\n",
)
f.write("edgesegmentsgi2\n")
f.write(f"{cells_per_dim[1]}\n")
for cell_block, index in zip(mesh.cells, cells_index):
if cell_block.dim == 1:
_write_cells(f, cell_block, index)
f.write("\n# X Y Z\n")
f.write("points\n")
f.write(f"{len(mesh.points)}\n")
points = mesh.points
if dimension != 3:
points = np.hstack(
(points, np.zeros((points.shape[0], 3 - dimension), dtype=points.dtype))
)
np.savetxt(f, points, "%" + float_fmt)
f.write("\n# pnum index\n")
f.write("pointelements\n")
f.write(f"{cells_per_dim[0]}\n")
for cell_block, index in zip(mesh.cells, cells_index):
if cell_block.dim == 0:
_write_cells(f, cell_block, index)
# currently, there is no better place for identification data
if isinstance(mesh.info, dict):
identifications = mesh.info.get("netgen:identifications")
identificationtypes = mesh.info.get("netgen:identificationtypes")
if identifications is not None and identificationtypes is not None:
f.write("\nidentifications\n")
f.write(f"{identifications.shape[0]}\n")
np.savetxt(f, identifications, "%d")
f.write("\nidentificationtypes\n")
f.write(f"{identificationtypes.size}\n")
np.savetxt(
f, identificationtypes.reshape(1, identificationtypes.size), "%d"
)
for codim in range(dimension + 1):
_write_codim_domain_data(f, mesh, cells_index, dimension, codim)
f.write("\nendmesh\n")
register_format("netgen", [".vol", ".vol.gz"], read, {"netgen": write})
src/meshio/neuroglancer/ 0000775 0000000 0000000 00000000000 14562440725 0015626 5 ustar 00root root 0000000 0000000 src/meshio/neuroglancer/__init__.py 0000664 0000000 0000000 00000000104 14562440725 0017732 0 ustar 00root root 0000000 0000000 from ._neuroglancer import read, write
__all__ = ["read", "write"]
src/meshio/neuroglancer/_neuroglancer.py 0000664 0000000 0000000 00000005437 14562440725 0021034 0 ustar 00root root 0000000 0000000 """
Neuroglancer format, used in large-scale neuropil segmentation data.
Adapted from https://github.com/HumanBrainProject/neuroglancer-scripts/blob/1fcabb613a715ba17c65d52596dec3d687ca3318/src/neuroglancer_scripts/mesh.py (MIT license)
"""
import struct
import numpy as np
from .._common import warn
from .._exceptions import ReadError
from .._files import open_file
from .._helpers import register_format
from .._mesh import CellBlock, Mesh
def write(filename, mesh):
with open_file(filename, "wb") as f:
write_buffer(f, mesh)
def write_buffer(f, mesh):
"""Store a mesh in Neuroglancer pre-computed format.
:param file: a file-like object opened in binary mode (its ``write`` method
will be called with :class:`bytes` objects).
:param meshio.Mesh mesh: Mesh object to write
"""
vertices = np.asarray(mesh.points, " num_vertices):
raise ReadError("The mesh references nonexistent vertices")
return Mesh(vertices, [CellBlock("triangle", triangles)])
register_format("neuroglancer", [], read, {"neuroglancer": write})
src/meshio/obj/ 0000775 0000000 0000000 00000000000 14562440725 0013714 5 ustar 00root root 0000000 0000000 src/meshio/obj/__init__.py 0000664 0000000 0000000 00000000073 14562440725 0016025 0 ustar 00root root 0000000 0000000 from ._obj import read, write
__all__ = ["read", "write"]
src/meshio/obj/_obj.py 0000664 0000000 0000000 00000010017 14562440725 0015176 0 ustar 00root root 0000000 0000000 """
I/O for the Wavefront .obj file format, cf.
.
"""
import datetime
import numpy as np
from ..__about__ import __version__
from .._exceptions import WriteError
from .._files import open_file
from .._helpers import register_format
from .._mesh import CellBlock, Mesh
def read(filename):
with open_file(filename, "r") as f:
mesh = read_buffer(f)
return mesh
def read_buffer(f):
points = []
vertex_normals = []
texture_coords = []
face_groups = []
face_group_ids = []
face_group_id = -1
while True:
line = f.readline()
if not line:
# EOF
break
strip = line.strip()
if len(strip) == 0 or strip[0] == "#":
continue
split = strip.split()
if split[0] == "v":
points.append([float(item) for item in split[1:]])
elif split[0] == "vn":
vertex_normals.append([float(item) for item in split[1:]])
elif split[0] == "vt":
texture_coords.append([float(item) for item in split[1:]])
elif split[0] == "s":
# "s 1" or "s off" controls smooth shading
pass
elif split[0] == "f":
dat = [int(item.split("/")[0]) for item in split[1:]]
if len(face_groups) == 0 or (
len(face_groups[-1]) > 0 and len(face_groups[-1][-1]) != len(dat)
):
face_groups.append([])
face_group_ids.append([])
face_groups[-1].append(dat)
face_group_ids[-1].append(face_group_id)
elif split[0] == "g":
# new group
face_groups.append([])
face_group_ids.append([])
face_group_id += 1
else:
# who knows
pass
# There may be empty groups, too.
# Remove them.
face_groups = [f for f in face_groups if len(f) > 0]
face_group_ids = [g for g in face_group_ids if len(g) > 0]
points = np.array(points)
texture_coords = np.array(texture_coords)
vertex_normals = np.array(vertex_normals)
point_data = {}
if len(texture_coords) > 0:
point_data["obj:vt"] = texture_coords
if len(vertex_normals) > 0:
point_data["obj:vn"] = vertex_normals
# convert to numpy arrays
face_groups = [np.array(f) for f in face_groups]
cell_data = {"obj:group_ids": []}
cells = []
for f, gid in zip(face_groups, face_group_ids):
if f.shape[1] == 3:
cells.append(CellBlock("triangle", f - 1))
elif f.shape[1] == 4:
cells.append(CellBlock("quad", f - 1))
else:
cells.append(CellBlock("polygon", f - 1))
cell_data["obj:group_ids"].append(gid)
return Mesh(points, cells, point_data=point_data, cell_data=cell_data)
def write(filename, mesh):
for c in mesh.cells:
if c.type not in ["triangle", "quad", "polygon"]:
raise WriteError(
"Wavefront .obj files can only contain triangle or quad cells."
)
with open_file(filename, "w") as f:
f.write(
"# Created by meshio v{}, {}\n".format(
__version__, datetime.datetime.now().isoformat()
)
)
for p in mesh.points:
f.write(f"v {p[0]} {p[1]} {p[2]}\n")
if "obj:vn" in mesh.point_data:
dat = mesh.point_data["obj:vn"]
fmt = "vn " + " ".join(["{}"] * dat.shape[1]) + "\n"
for vn in dat:
f.write(fmt.format(*vn))
if "obj:vt" in mesh.point_data:
dat = mesh.point_data["obj:vt"]
fmt = "vt " + " ".join(["{}"] * dat.shape[1]) + "\n"
for vt in dat:
f.write(fmt.format(*vt))
for cell_block in mesh.cells:
fmt = "f " + " ".join(["{}"] * cell_block.data.shape[1]) + "\n"
for c in cell_block.data:
f.write(fmt.format(*(c + 1)))
register_format("obj", [".obj"], read, {"obj": write})
src/meshio/off/ 0000775 0000000 0000000 00000000000 14562440725 0013714 5 ustar 00root root 0000000 0000000 src/meshio/off/__init__.py 0000664 0000000 0000000 00000000073 14562440725 0016025 0 ustar 00root root 0000000 0000000 from ._off import read, write
__all__ = ["read", "write"]
src/meshio/off/_off.py 0000664 0000000 0000000 00000005433 14562440725 0015204 0 ustar 00root root 0000000 0000000 """
I/O for the OFF surface format, cf.
,
.
"""
import numpy as np
from .._common import warn
from .._exceptions import ReadError
from .._files import open_file
from .._helpers import register_format
from .._mesh import CellBlock, Mesh
def read(filename):
with open_file(filename) as f:
points, cells = read_buffer(f)
return Mesh(points, cells)
def read_buffer(f):
# assert that the first line reads `OFF`
line = f.readline()
if isinstance(line, (bytes, bytearray)):
raise ReadError("Expected text buffer, not bytes.")
if line.strip() != "OFF":
raise ReadError("Expected the first line to be `OFF`.")
# fast forward to the next significant line
while True:
line = f.readline().strip()
if line and line[0] != "#":
break
# This next line contains:
#
num_verts, num_faces, _ = line.split(" ")
num_verts = int(num_verts)
num_faces = int(num_faces)
verts = np.fromfile(f, dtype=float, count=3 * num_verts, sep=" ").reshape(
num_verts, 3
)
data = np.fromfile(f, dtype=int, count=4 * num_faces, sep=" ").reshape(num_faces, 4)
if not np.all(data[:, 0] == 3):
raise ReadError("Can only read triangular faces")
cells = [CellBlock("triangle", data[:, 1:])]
return verts, cells
def write(filename, mesh):
if mesh.points.shape[1] == 2:
warn(
"OFF requires 3D points, but 2D points given. "
"Appending 0 as third component."
)
points = np.column_stack([mesh.points, np.zeros_like(mesh.points[:, 0])])
else:
points = mesh.points
skip = [c for c in mesh.cells if c.type != "triangle"]
if skip:
string = ", ".join(item.type for item in skip)
warn(f"OFF only supports triangle cells. Skipping {string}.")
tri = mesh.get_cells_type("triangle")
with open(filename, "wb") as fh:
fh.write(b"OFF\n")
fh.write(b"# Created by meshio\n\n")
# counts
c = f"{mesh.points.shape[0]} {tri.shape[0]} {0}\n\n"
fh.write(c.encode())
# vertices
# np.savetxt(fh, mesh.points, "%r") # slower
fmt = " ".join(["{}"] * points.shape[1])
out = "\n".join([fmt.format(*row) for row in points]) + "\n"
fh.write(out.encode())
# triangles
out = np.column_stack([np.full(tri.shape[0], 3, dtype=tri.dtype), tri])
# savetxt is slower
# np.savetxt(fh, out, "%d %d %d %d")
fmt = " ".join(["{}"] * out.shape[1])
out = "\n".join([fmt.format(*row) for row in out]) + "\n"
fh.write(out.encode())
register_format("off", [".off"], read, {"off": write})
src/meshio/permas/ 0000775 0000000 0000000 00000000000 14562440725 0014431 5 ustar 00root root 0000000 0000000 src/meshio/permas/__init__.py 0000664 0000000 0000000 00000000076 14562440725 0016545 0 ustar 00root root 0000000 0000000 from ._permas import read, write
__all__ = ["read", "write"]
src/meshio/permas/_permas.py 0000664 0000000 0000000 00000021136 14562440725 0016434 0 ustar 00root root 0000000 0000000 """
I/O for PERMAS dat files.
"""
import numpy as np
from ..__about__ import __version__
from .._common import warn
from .._exceptions import ReadError
from .._files import open_file
from .._helpers import register_format
from .._mesh import CellBlock, Mesh
permas_to_meshio_type = {
"PLOT1": "vertex",
"PLOTL2": "line",
"FLA2": "line",
"FLA3": "line3",
"PLOTL3": "line3",
"BECOS": "line",
"BECOC": "line",
"BETAC": "line",
"BECOP": "line",
"BETOP": "line",
"BEAM2": "line",
"FSCPIPE2": "line",
"LOADA4": "quad",
"PLOTA4": "quad",
"QUAD4": "quad",
"QUAD4S": "quad",
"QUAMS4": "quad",
"SHELL4": "quad",
"PLOTA8": "quad8",
"LOADA8": "quad8",
"QUAMS8": "quad8",
"PLOTA9": "quad9",
"LOADA9": "quad9",
"QUAMS9": "quad9",
"PLOTA3": "triangle",
"SHELL3": "triangle",
"TRIA3": "triangle",
"TRIA3K": "triangle",
"TRIA3S": "triangle",
"TRIMS3": "triangle",
"LOADA6": "triangle6",
"TRIMS6": "triangle6",
"HEXE8": "hexahedron",
"HEXFO8": "hexahedron",
"HEXE20": "hexahedron20",
"HEXE27": "hexahedron27",
"TET4": "tetra",
"TET10": "tetra10",
"PYRA5": "pyramid",
"PENTA6": "wedge",
"PENTA15": "wedge15",
}
meshio_to_permas_type = {v: k for k, v in permas_to_meshio_type.items()}
def read(filename):
"""Reads a PERMAS dat file."""
with open_file(filename, "r") as f:
out = read_buffer(f)
return out
def read_buffer(f):
# Initialize the optional data fields
cells = []
nsets = {}
elsets = {}
field_data = {}
cell_data = {}
point_data = {}
while True:
line = f.readline()
if not line:
# EOF
break
# Comments
if line.startswith("!"):
continue
keyword = line.strip("$").upper()
if keyword.startswith("COOR"):
points, point_gids = _read_nodes(f)
elif keyword.startswith("ELEMENT"):
key, idx = _read_cells(f, keyword, point_gids)
cells.append(CellBlock(key, idx))
elif keyword.startswith("NSET"):
params_map = get_param_map(keyword, required_keys=["NSET"])
setids = read_set(f, params_map)
name = params_map["NSET"]
if name not in nsets:
nsets[name] = []
nsets[name].append(setids)
elif keyword.startswith("ESET"):
params_map = get_param_map(keyword, required_keys=["ESET"])
setids = read_set(f, params_map)
name = params_map["ESET"]
if name not in elsets:
elsets[name] = []
elsets[name].append(setids)
else:
# There are just too many PERMAS keywords to explicitly skip them.
pass
return Mesh(
points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data
)
def _read_nodes(f):
points = []
point_gids = {}
index = 0
while True:
last_pos = f.tell()
line = f.readline()
if line.startswith("!"):
break
if line.startswith("$"):
break
entries = line.strip().split(" ")
gid, x = entries[0], entries[1:]
point_gids[int(gid)] = index
points.append([float(xx) for xx in x])
index += 1
f.seek(last_pos)
return np.array(points, dtype=float), point_gids
def _read_cells(f, line0, point_gids):
sline = line0.split(" ")[1:]
etype_sline = sline[0]
if "TYPE" not in etype_sline:
raise ReadError(etype_sline)
etype = etype_sline.split("=")[1].strip()
if etype not in permas_to_meshio_type:
raise ReadError(f"Element type not available: {etype}")
cell_type = permas_to_meshio_type[etype]
cells, idx = [], []
while True:
last_pos = f.tell()
line = f.readline()
if line.startswith("$") or line == "":
break
line = line.strip()
# the first item is just a running index
idx += [point_gids[int(k)] for k in filter(None, line.split(" ")[1:])]
if not line.endswith("!"):
cells.append(idx)
idx = []
f.seek(last_pos)
return cell_type, np.array(cells)
def get_param_map(word, required_keys=None):
"""
get the optional arguments on a line
Example
-------
>>> iline = 0
>>> word = 'elset,instance=dummy2,generate'
>>> params = get_param_map(iline, word, required_keys=['instance'])
params = {
'elset' : None,
'instance' : 'dummy2,
'generate' : None,
}
"""
if required_keys is None:
required_keys = []
words = word.split(",")
param_map = {}
for wordi in words:
if "=" not in wordi:
key = wordi.strip()
value = None
else:
sword = wordi.split("=")
if len(sword) != 2:
raise ReadError(sword)
key = sword[0].strip()
value = sword[1].strip()
param_map[key] = value
msg = ""
for key in required_keys:
if key not in param_map:
msg += f"{key} not found in {word}\n"
if msg:
raise RuntimeError(msg)
return param_map
def read_set(f, params_map):
set_ids = []
while True:
last_pos = f.tell()
line = f.readline()
if line.startswith("$"):
break
set_ids += [int(k) for k in line.strip().strip(" ").split(" ")]
f.seek(last_pos)
if "generate" in params_map:
if len(set_ids) != 3:
raise ReadError(set_ids)
set_ids = np.arange(set_ids[0], set_ids[1], set_ids[2])
else:
try:
set_ids = np.unique(np.array(set_ids, dtype="int32"))
except ValueError:
raise
return set_ids
def write(filename, mesh):
if mesh.points.shape[1] == 2:
warn(
"PERMAS requires 3D points, but 2D points given. "
"Appending 0 third component."
)
points = np.column_stack([mesh.points, np.zeros_like(mesh.points[:, 0])])
else:
points = mesh.points
with open_file(filename, "wt") as f:
f.write("!PERMAS DataFile Version 18.0\n")
f.write(f"!written by meshio v{__version__}\n")
f.write("$ENTER COMPONENT NAME=DFLT_COMP\n")
f.write("$STRUCTURE\n")
f.write("$COOR\n")
for k, x in enumerate(points):
f.write(f"{k + 1} {x[0]} {x[1]} {x[2]}\n")
eid = 0
tria6_order = [0, 3, 1, 4, 2, 5]
tet10_order = [0, 4, 1, 5, 2, 6, 7, 8, 9, 3]
quad9_order = [0, 4, 1, 7, 8, 5, 3, 6, 2]
wedge15_order = [0, 6, 1, 7, 2, 8, 9, 10, 11, 3, 12, 4, 13, 5, 14]
for cell_block in mesh.cells:
node_idcs = cell_block.data
f.write("!\n")
f.write("$ELEMENT TYPE=" + meshio_to_permas_type[cell_block.type] + "\n")
if cell_block.type == "tetra10":
for row in node_idcs:
eid += 1
mylist = row.tolist()
mylist = [mylist[i] for i in tet10_order]
nids_strs = (str(nid + 1) for nid in mylist)
f.write(str(eid) + " " + " ".join(nids_strs) + "\n")
elif cell_block.type == "triangle6":
for row in node_idcs:
eid += 1
mylist = row.tolist()
mylist = [mylist[i] for i in tria6_order]
nids_strs = (str(nid + 1) for nid in mylist)
f.write(str(eid) + " " + " ".join(nids_strs) + "\n")
elif cell_block.type == "quad9":
for row in node_idcs:
eid += 1
mylist = row.tolist()
mylist = [mylist[i] for i in quad9_order]
nids_strs = (str(nid + 1) for nid in mylist)
f.write(str(eid) + " " + " ".join(nids_strs) + "\n")
elif cell_block.type == "wedge15":
for row in node_idcs:
eid += 1
mylist = row.tolist()
mylist = [mylist[i] for i in wedge15_order]
nids_strs = (str(nid + 1) for nid in mylist)
f.write(str(eid) + " " + " ".join(nids_strs) + "\n")
else:
for row in node_idcs:
eid += 1
nids_strs = (str(nid + 1) for nid in row.tolist())
f.write(str(eid) + " " + " ".join(nids_strs) + "\n")
f.write("$END STRUCTURE\n")
f.write("$EXIT COMPONENT\n")
f.write("$FIN\n")
register_format(
"permas", [".post", ".post.gz", ".dato", ".dato.gz"], read, {"permas": write}
)
src/meshio/ply/ 0000775 0000000 0000000 00000000000 14562440725 0013746 5 ustar 00root root 0000000 0000000 src/meshio/ply/__init__.py 0000664 0000000 0000000 00000000073 14562440725 0016057 0 ustar 00root root 0000000 0000000 from ._ply import read, write
__all__ = ["read", "write"]
src/meshio/ply/_ply.py 0000664 0000000 0000000 00000042224 14562440725 0015267 0 ustar 00root root 0000000 0000000 """
I/O for the PLY format, cf.
.
.
"""
import collections
import datetime
import re
import sys
import numpy as np
from ..__about__ import __version__
from .._common import warn
from .._exceptions import ReadError, WriteError
from .._files import open_file
from .._helpers import register_format
from .._mesh import CellBlock, Mesh
# Reference dtypes
ply_to_numpy_dtype = {
# [u]char is often used as [u]int, e.g., from Wikipedia:
# > The word 'list' indicates that the data is a list of values, the first of which
# > is the number of entries in the list (represented as a 'uchar' in this case).
"char": np.int8,
"uchar": np.uint8,
"short": np.int16,
"ushort": np.uint16,
"int": np.int32,
"int8": np.int8,
"int32": np.int32,
"int64": np.int64,
"uint": np.uint32,
"uint8": np.uint8,
"uint16": np.uint16,
"uint32": np.uint32,
"uint64": np.uint64,
"float": np.float32,
"float32": np.float32,
"float64": np.float64,
"double": np.float64,
}
numpy_to_ply_dtype = {np.dtype(v): k for k, v in ply_to_numpy_dtype.items()}
def cell_type_from_count(count):
if count == 1:
return "vertex"
elif count == 2:
return "line"
elif count == 3:
return "triangle"
elif count == 4:
return "quad"
return "polygon"
def read(filename):
with open_file(filename, "rb") as f:
mesh = read_buffer(f)
return mesh
def _next_line(f):
# fast forward to the next significant line
while True:
line = f.readline().decode().strip()
if line and line[:7] != "comment":
break
return line
def read_buffer(f):
# assert that the first line reads `ply`
line = f.readline().decode().strip()
if line != "ply":
raise ReadError("Expected ply")
line = _next_line(f)
endianness = None
if line == "format ascii 1.0":
is_binary = False
elif line == "format binary_big_endian 1.0":
is_binary = True
endianness = ">"
else:
if line != "format binary_little_endian 1.0":
raise ReadError()
is_binary = True
endianness = "<"
# read header
line = _next_line(f)
num_verts = 0
num_cells = 0
point_data_formats = []
point_data_names = []
cell_data_names = []
cell_data_dtypes = []
while line != "end_header":
m_vert = re.match("element vertex (\\d+)", line)
m_face = re.match("element face (\\d+)", line)
if line[:8] == "obj_info":
line = _next_line(f)
elif m_vert is not None:
num_verts = int(m_vert.groups()[0])
# read point data
line = _next_line(f)
while line[:8] == "property":
m = re.match("property (.+) (.+)", line)
assert m is not None
point_data_formats.append(m.groups()[0])
point_data_names.append(m.groups()[1])
line = _next_line(f)
elif m_face is not None:
num_cells = int(m_face.groups()[0])
if num_cells < 0:
raise ReadError(f"Expected positive num_cells (got `{num_cells}`.")
# read property lists
line = _next_line(f)
# read cell data
while line[:8] == "property":
if line[:13] == "property list":
m = re.match("property list (.+) (.+) (.+)", line)
assert m is not None
cell_data_dtypes.append(tuple(m.groups()[:-1]))
else:
m = re.match("property (.+) (.+)", line)
assert m is not None
cell_data_dtypes.append(m.groups()[0])
cell_data_names.append(m.groups()[-1])
line = _next_line(f)
else:
raise ReadError(
"Expected `element vertex` or `element face` or `obj_info`, "
f"got `{line}`"
)
if is_binary:
mesh = _read_binary(
f,
endianness,
point_data_names,
point_data_formats,
num_verts,
num_cells,
cell_data_names,
cell_data_dtypes,
)
else:
mesh = _read_ascii(
f,
point_data_names,
point_data_formats,
num_verts,
num_cells,
cell_data_names,
cell_data_dtypes,
)
return mesh
def _read_ascii(
f,
point_data_names,
point_data_formats,
num_verts,
num_cells,
cell_data_names,
cell_dtypes,
):
assert len(cell_data_names) == len(cell_dtypes)
# assert that all formats are the same
# Now read the data
dtype = np.dtype(
[
(name, ply_to_numpy_dtype[fmt])
for name, fmt in zip(point_data_names, point_data_formats)
]
)
pd = np.genfromtxt(f, max_rows=num_verts, dtype=dtype)
# split off coordinate data and additional point data
verts = []
k = 0
if point_data_names[0] == "x":
verts.append(pd["x"])
k += 1
if point_data_names[1] == "y":
verts.append(pd["y"])
k += 1
if point_data_names[2] == "z":
verts.append(pd["z"])
k += 1
verts = np.column_stack(verts)
point_data = {
point_data_names[i]: pd[point_data_names[i]]
for i in range(k, len(point_data_names))
}
cell_data = {}
cell_blocks = []
for k in range(num_cells):
line = f.readline().decode().strip()
data = line.split()
if k == 0:
# initialize the cell data arrays
i = 0
cell_data = {}
for name, dtype in zip(cell_data_names, cell_dtypes):
if name == "vertex_indices":
n = int(data[i])
i += n + 1
else:
cell_data[name] = collections.defaultdict(list)
i += 1
# go over the line
i = 0
n = None
for name, dtype in zip(cell_data_names, cell_dtypes):
if name == "vertex_indices":
idx_dtype, value_dtype = dtype
n = ply_to_numpy_dtype[idx_dtype](data[i])
dtype = ply_to_numpy_dtype[value_dtype]
idx = dtype(data[i + 1 : i + n + 1])
if len(cell_blocks) == 0 or len(cell_blocks[-1][1][-1]) != n:
cell_blocks.append((cell_type_from_count(n), [idx]))
else:
cell_blocks[-1][1].append(idx)
i += n + 1
else:
dtype = ply_to_numpy_dtype[dtype]
# use n from vertex_indices
assert n is not None
cell_data[name][n] += [dtype(data[j]) for j in range(i, i + 1)]
i += 1
cell_data = {
key: [np.array(v) for v in value.values()] for key, value in cell_data.items()
}
return Mesh(verts, cell_blocks, point_data=point_data, cell_data=cell_data)
def _read_binary(
f,
endianness,
point_data_names,
formats,
num_verts,
num_cells,
cell_data_names,
cell_data_dtypes,
):
ply_to_numpy_dtype_string = {
"uchar": "i1",
"uint": "u4",
"uint8": "u1",
"uint16": "u2",
"uint32": "u4",
"uint64": "u8",
"int": "i4",
"int8": "i1",
"int32": "i4",
"int64": "i8",
"float": "f4",
"float32": "f4",
"double": "f8",
}
# read point data
dtype = [
(name, endianness + ply_to_numpy_dtype_string[fmt])
for name, fmt in zip(point_data_names, formats)
]
point_data = np.frombuffer(
f.read(num_verts * np.dtype(dtype).itemsize), dtype=dtype
)
verts = np.column_stack([point_data["x"], point_data["y"], point_data["z"]])
point_data = {
name: point_data[name]
for name in point_data_names
if name not in ["x", "y", "z"]
}
# Convert strings to proper numpy dtypes
dts = [
(
(
endianness + ply_to_numpy_dtype_string[dtype[0]],
endianness + ply_to_numpy_dtype_string[dtype[1]],
)
if isinstance(dtype, tuple)
else endianness + ply_to_numpy_dtype_string[dtype]
)
for dtype in cell_data_dtypes
]
# memoryviews can be sliced and passed around without copying. However, the
# `bytearray()` call here redundantly copies so that the final output arrays
# are writeable.
buffer = memoryview(bytearray(f.read()))
buffer_position = 0
cell_data = {}
for name, dt in zip(cell_data_names, dts):
if isinstance(dt, tuple):
buffer_increment, cell_data[name] = _read_binary_list(
buffer[buffer_position:], dt[0], dt[1], num_cells, endianness
)
else:
buffer_increment = np.dtype(dt).itemsize
cell_data[name] = np.frombuffer(
buffer[buffer_position : buffer_position + buffer_increment], dtype=dt
)[0]
buffer_position += buffer_increment
cells = cell_data.pop("vertex_indices", [])
return Mesh(verts, cells, point_data=point_data, cell_data=cell_data)
def _read_binary_list(buffer, count_dtype, data_dtype, num_cells, endianness):
"""Parse a ply ragged list into a :class:`CellBlock` for each change in row
length. The only way to know how many bytes the list takes up is to parse
it. Hence this function also returns the number of bytes consumed.
"""
count_dtype, data_dtype = np.dtype(count_dtype), np.dtype(data_dtype)
count_itemsize = count_dtype.itemsize
data_itemsize = data_dtype.itemsize
byteorder = "little" if endianness == "<" else "big"
# Firstly, walk the buffer to extract all start and end ids (in bytes) of
# each row into `byte_starts_ends`. Using `np.fromiter(generator)` is
# 2-3x faster than list comprehension or manually populating an array with
# a for loop. This is still very much the bottleneck - might be worth
# ctype-ing in future?
def parse_ragged(start, num_cells):
at = start
yield at
for _ in range(num_cells):
count = int.from_bytes(buffer[at : at + count_itemsize], byteorder)
at += count * data_itemsize + count_itemsize
yield at
# Row `i` is given by `buffer[byte_starts_ends[i]: byte_starts_ends[i+1]]`.
byte_starts_ends = np.fromiter(parse_ragged(0, num_cells), np.intp, num_cells + 1)
# Next, find where the row length changes and list the (start, end) row ids
# of each homogeneous block into `block_bounds`.
row_lengths = np.diff(byte_starts_ends)
count_changed_ids = np.nonzero(np.diff(row_lengths))[0] + 1
block_bounds = []
start = 0
for end in count_changed_ids:
block_bounds.append((start, end))
start = end
block_bounds.append((start, len(byte_starts_ends) - 1))
# Finally, parse each homogeneous block. Constructing an appropriate
# `block_dtype` to include the initial counts in each row avoids any
# wasteful copy operations.
blocks = []
for start, end in block_bounds:
if start == end:
# This should only happen if the element was empty to begin with.
continue
block_buffer = buffer[byte_starts_ends[start] : byte_starts_ends[end]]
cells_per_row = (row_lengths[start] - count_itemsize) // data_itemsize
block_dtype = np.dtype(
[("count", count_dtype), ("data", data_dtype * cells_per_row)]
)
cells = np.frombuffer(block_buffer, dtype=block_dtype)["data"]
cell_type = cell_type_from_count(cells.shape[1])
blocks.append(CellBlock(cell_type, cells))
return byte_starts_ends[-1], blocks
def write(filename, mesh: Mesh, binary: bool = True): # noqa: C901
with open_file(filename, "wb") as fh:
fh.write(b"ply\n")
if binary:
fh.write(f"format binary_{sys.byteorder}_endian 1.0\n".encode())
else:
fh.write(b"format ascii 1.0\n")
now = datetime.datetime.now().isoformat()
fh.write(f"comment Created by meshio v{__version__}, {now}\n".encode())
# counts
fh.write(f"element vertex {mesh.points.shape[0]:d}\n".encode())
#
dim_names = ["x", "y", "z"]
# From :
#
# > The type can be specified with one of char uchar short ushort int uint float
# > double, or one of int8 uint8 int16 uint16 int32 uint32 float32 float64.
#
# We're adding [u]int64 here.
type_name_table = {
np.dtype(np.int8): "int8",
np.dtype(np.int16): "int16",
np.dtype(np.int32): "int32",
np.dtype(np.int64): "int64",
np.dtype(np.uint8): "uint8",
np.dtype(np.uint16): "uint16",
np.dtype(np.uint32): "uint32",
np.dtype(np.uint64): "uint64",
np.dtype(np.float32): "float",
np.dtype(np.float64): "double",
}
for k in range(mesh.points.shape[1]):
type_name = type_name_table[mesh.points.dtype]
fh.write(f"property {type_name} {dim_names[k]}\n".encode())
pd = []
for key, value in mesh.point_data.items():
if len(value.shape) > 1:
warn(
"PLY writer doesn't support multidimensional point data yet. "
f"Skipping {key}."
)
continue
type_name = type_name_table[value.dtype]
fh.write(f"property {type_name} {key}\n".encode())
pd.append(value)
num_cells = 0
legal_cell_types = ["vertex", "line", "triangle", "quad", "polygon"]
for cell_block in mesh.cells:
if cell_block.type in legal_cell_types:
num_cells += cell_block.data.shape[0]
if num_cells > 0:
fh.write(f"element face {num_cells:d}\n".encode())
# possibly cast down to int32
# TODO don't alter the mesh data
has_cast = False
for k, cell_block in enumerate(mesh.cells):
if cell_block.data.dtype == np.int64:
has_cast = True
mesh.cells[k] = CellBlock(
cell_block.type, cell_block.data.astype(np.int32)
)
if has_cast:
warn("PLY doesn't support 64-bit integers. Casting down to 32-bit.")
# assert that all cell dtypes are equal
cell_dtype = None
for cell_block in mesh.cells:
if cell_dtype is None:
cell_dtype = cell_block.data.dtype
if cell_block.data.dtype != cell_dtype:
raise WriteError()
if cell_dtype is not None:
ply_type = numpy_to_ply_dtype[cell_dtype]
fh.write(f"property list uint8 {ply_type} vertex_indices\n".encode())
# TODO other cell data
fh.write(b"end_header\n")
if binary:
# points and point_data
out = np.rec.fromarrays([coord for coord in mesh.points.T] + pd)
fh.write(out.tobytes())
# cells
for cell_block in mesh.cells:
if cell_block.type not in legal_cell_types:
warn(
f'cell_type "{cell_block.type}" is not supported by PLY format '
"- skipping"
)
continue
# prepend with count
d = cell_block.data
out = np.rec.fromarrays(
[np.broadcast_to(np.uint8(d.shape[1]), d.shape[0]), *d.T]
)
fh.write(out.tobytes())
else:
# vertices
# np.savetxt(fh, mesh.points, "%r") # slower
# out = np.column_stack([mesh.points] + list(mesh.point_data.values()))
out = np.rec.fromarrays([coord for coord in mesh.points.T] + pd)
fmt = " ".join(["{}"] * len(out[0]))
out = "\n".join([fmt.format(*row) for row in out]) + "\n"
fh.write(out.encode())
# cells
for cell_block in mesh.cells:
if cell_block.type not in legal_cell_types:
warn(
f'cell_type "{cell_block.type}" is not supported by PLY format '
+ "- skipping"
)
continue
# if cell_type not in cell_type_to_count.keys():
# continue
d = cell_block.data
out = np.column_stack(
[np.full(d.shape[0], d.shape[1], dtype=d.dtype), d]
)
# savetxt is slower
# np.savetxt(fh, out, "%d %d %d %d")
fmt = " ".join(["{}"] * out.shape[1])
out = "\n".join([fmt.format(*row) for row in out]) + "\n"
fh.write(out.encode())
register_format("ply", [".ply"], read, {"ply": write})
src/meshio/stl/ 0000775 0000000 0000000 00000000000 14562440725 0013744 5 ustar 00root root 0000000 0000000 src/meshio/stl/__init__.py 0000664 0000000 0000000 00000000073 14562440725 0016055 0 ustar 00root root 0000000 0000000 from ._stl import read, write
__all__ = ["read", "write"]
src/meshio/stl/_stl.py 0000664 0000000 0000000 00000017720 14562440725 0015266 0 ustar 00root root 0000000 0000000 """
I/O for the STL format, cf.
.
"""
from __future__ import annotations
import os
import numpy as np
from ..__about__ import __version__
from .._common import warn
from .._exceptions import ReadError
from .._files import open_file
from .._helpers import register_format
from .._mesh import CellBlock, Mesh
def read(filename):
with open_file(filename, "rb") as f:
# Checking if the file is ASCII format is normally done by checking if the
# first 5 characters of the header is "solid".
# ```
# header = f.read(80).decode()
# ```
# Unfortunately, there are mesh files out there which are binary and still put
# "solid" there.
# A suggested alternative is to pretend the file is binary, read the
# num_triangles and see if it matches the file size
# (https://stackoverflow.com/a/7394842/353337).
filesize_bytes = os.path.getsize(filename)
if filesize_bytes < 80:
return _read_ascii(f)
f.read(80)
num_triangles = np.fromfile(f, count=1, dtype=".
def iter_loadtxt(
infile,
skiprows: int = 0,
comments: str | tuple[str, ...] = "#",
dtype=float,
usecols: tuple[int] | None = None,
):
def iter_func():
items = None
for _ in range(skiprows):
try:
next(infile)
except StopIteration:
raise ReadError("EOF Skipped too many rows")
for line in infile:
line = line.decode().strip()
if line.startswith(comments):
continue
# remove all text
items = line.split()[-3:]
usecols_ = range(len(items)) if usecols is None else usecols
for idx in usecols_:
yield dtype(items[idx])
if items is None:
iter_loadtxt.rowlength = 3
return
iter_loadtxt.rowlength = len(items) if usecols is None else len(usecols)
data = np.fromiter(iter_func(), dtype=dtype)
return data.reshape((-1, iter_loadtxt.rowlength))
def _read_ascii(f):
# The file has the form
# ```
# solid foo
# facet normal 0.455194 -0.187301 -0.870469
# outer loop
# vertex 266.36 234.594 14.6145
# vertex 268.582 234.968 15.6956
# vertex 267.689 232.646 15.7283
# endloop
# endfacet
# # [...] more facets [...]
# endsolid
# ```
# In the interest of speed, don't verify the format and instead just skip the text.
# TODO Pandas is MUCH faster than numpy for i/o, see
# .
# import pandas
# data = pandas.read_csv(
# f,
# skiprows=lambda row: row == 0 or (row - 1) % 7 in [0, 1, 5, 6],
# skipfooter=1,
# usecols=(1, 2, 3),
# )
# np.loadtxt is super slow
# data = np.loadtxt(
# f,
# comments=["solid", "facet", "outer loop", "endloop", "endfacet", "endsolid"],
# usecols=(1, 2, 3),
# )
data = iter_loadtxt(
f,
comments=("solid", "outer loop", "endloop", "endfacet", "endsolid"),
# usecols=(1, 2, 3),
)
if data.shape[0] % 4 != 0:
raise ReadError()
# split off the facet normals
facet_rows = np.zeros(len(data), dtype=bool)
facet_rows[0::4] = True
facet_normals = data[facet_rows]
data = data[~facet_rows]
if data.shape[0] == 0:
points = []
cells = {}
cell_data = {}
else:
facets = np.split(data, data.shape[0] // 3)
points, cells = data_from_facets(facets)
cell_data = {"facet_normals": [facet_normals]}
return Mesh(points, cells, cell_data=cell_data)
def data_from_facets(facets):
# Now, all facets contain the point coordinate. Try to identify individual points
# and build the data arrays.
if len(facets) == 0:
points = np.empty((0, 3), dtype=float)
cells = []
else:
pts = np.concatenate(facets)
# TODO equip `unique()` with a tolerance
# Use return_index so we can use sort on `idx` such that the order is
# preserved; see .
_, idx, inv = np.unique(pts, axis=0, return_index=True, return_inverse=True)
k = np.argsort(idx)
points = pts[idx[k]]
inv_k = np.argsort(k)
cells = [CellBlock("triangle", inv_k[inv].reshape(-1, 3))]
return points, cells
def _read_binary(f, num_triangles: int):
# for each triangle, one has 3 float32 (facet normal), 9 float32 (facet), and 1
# int16 (attribute count)
out = np.fromfile(
f,
count=num_triangles,
dtype=np.dtype(
[("normal", " 1:
invalid = {block.type for block in mesh.cells if block.type != "triangle"}
invalid = ", ".join(invalid)
warn(f"STL can only write triangle cells. Discarding {invalid}.")
if mesh.points.shape[1] == 2:
warn(
"STL requires 3D points, but 2D points given. Appending 0 third component."
)
points = np.column_stack([mesh.points, np.zeros_like(mesh.points[:, 0])])
else:
points = mesh.points
pts = points[mesh.get_cells_type("triangle")]
if "facet_normals" in mesh.cell_data:
normals = mesh.get_cell_data("facet_normals", "triangle")
else:
normals = np.cross(pts[:, 1] - pts[:, 0], pts[:, 2] - pts[:, 0])
nrm = np.sqrt(np.einsum("ij,ij->i", normals, normals))
normals = (normals.T / nrm).T
fun = _write_binary if binary else _write_ascii
fun(filename, pts, normals)
def _write_ascii(filename, pts, normals):
with open_file(filename, "w") as fh:
fh.write("solid\n")
for local_pts, normal in zip(pts, normals):
out = (
"\n".join(
[
"facet normal {} {} {}".format(*normal),
" outer loop",
" vertex {} {} {}".format(*local_pts[0]),
" vertex {} {} {}".format(*local_pts[1]),
" vertex {} {} {}".format(*local_pts[2]),
" endloop",
"endfacet",
]
)
+ "\n"
)
fh.write(out)
fh.write("endsolid\n")
def _write_binary(filename, pts, normals):
with open_file(filename, "wb") as fh:
# 80 character header data
msg = f"This file was generated by meshio v{__version__}."
msg += (79 - len(msg)) * "X"
msg += "\n"
fh.write(msg.encode())
fh.write(np.array(len(pts)).astype("