pax_global_header00006660000000000000000000000064132350557520014521gustar00rootroot0000000000000052 comment=be6becb232ae94b814702a23f757014b658b3fbd meshio-1.11.7/000077500000000000000000000000001323505575200130745ustar00rootroot00000000000000meshio-1.11.7/.circleci/000077500000000000000000000000001323505575200147275ustar00rootroot00000000000000meshio-1.11.7/.circleci/config.yml000066400000000000000000000037661323505575200167330ustar00rootroot00000000000000version: 2 jobs: # python2: # working_directory: ~/meshio # docker: # - image: ubuntu:17.10 # steps: # - run: apt-get update # - run: apt-get install -y git python-h5py python-numpy python-vtk6 python-pip # - run: pip2 install -U pytest pytest-cov pylint # - checkout # - run: pip2 install . # # The actual test # - run: pylint meshio/ # - run: pylint test/*py # # No Exodus tests on VTK6 -- they segfault # - run: rm -f test/test_exodus.py # # XDMF3 is not part of VTK6 # - run: rm -f test/test_xdmf3.py # # ASCII VTK tests fail with Python2/VTK6 # - run: rm -f test/test_vtk.py # - run: cd test/ && pytest --cov meshio # python3: build: working_directory: ~/meshio docker: - image: ubuntu:17.10 steps: - run: apt-get update - run: apt-get install -y software-properties-common - run: LANG=C.UTF-8 apt-add-repository -y ppa:nschloe/vtk7 - run: apt-get update - run: apt-get install -y git python3-h5py python3-netcdf4 python3-vtk7 python3-pip - run: pip3 install -U pytest pytest-cov pylint - checkout - run: pip3 install .[exodus,hdf5] # Make sure to get numpy 1.13.0 - run: pip3 install -U numpy # make sure that rst converts correctly - run: apt install -y pandoc - run: pip3 install docutils pygments - run: make README.rst - run: python3 setup.py check -r -s # The actual test - run: pylint meshio/ - run: pylint test/*py - run: pylint tools/meshio-convert - run: cd test/ && pytest --cov meshio # submit to codecov - run: apt-get install -y curl - run: bash <(curl -s https://codecov.io/bash) # workflow builds don't support forked PRs yet, cf. # . # workflows: # version: 2 # build-and-test: # jobs: # - python2 # - python3 meshio-1.11.7/.github/000077500000000000000000000000001323505575200144345ustar00rootroot00000000000000meshio-1.11.7/.github/ISSUE_TEMPLATE.md000066400000000000000000000002611323505575200171400ustar00rootroot00000000000000If you're having problems converting from or to a mesh format, remember to attach a mesh that shows the problem. This will enable other people to reproduce and fix the problem. meshio-1.11.7/.gitignore000066400000000000000000000002411323505575200150610ustar00rootroot00000000000000*.bin *.dat *.dato *.e *.geo *.h5 *.h5m *.msh *.off *.pvtu *.pyc *.vtk *.vtu *.xdmf *.xmf *.xml .cache/ MANIFEST README.rst build/ dist/ doc/_build/ *.egg-info/ meshio-1.11.7/.pylintrc000066400000000000000000000004461323505575200147450ustar00rootroot00000000000000[MESSAGES CONTROL] disable= invalid-name, too-many-locals, missing-docstring, too-many-arguments, too-many-statements, no-member, bad-continuation, unused-argument, too-many-branches, duplicate-code, fixme, wildcard-import, locally-disabled meshio-1.11.7/.travis.yml000066400000000000000000000006531323505575200152110ustar00rootroot00000000000000dist: trusty language: python python: - '2.7' - '3.4' virtualenv: system_site_packages: true addons: apt: packages: - python-h5py - python3-h5py - python-numpy - python3-numpy - python-vtk6 before_install: - pip install -U pytest pytest-cov install: - pip install . script: - cd test/ && pytest --cov meshio after_success: - bash <(curl -s https://codecov.io/bash) meshio-1.11.7/LICENSE.txt000066400000000000000000000020761323505575200147240ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2015-2018 Nico Schlömer Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. meshio-1.11.7/Makefile000066400000000000000000000014511323505575200145350ustar00rootroot00000000000000VERSION=$(shell python3 -c "import meshio; print(meshio.__version__)") default: @echo "\"make publish\"?" README.rst: README.md cat README.md | sed -e 's__![](\1){width="\2"}_g' -e 's_]*>__g' -e 's_

__g' > /tmp/README.md pandoc /tmp/README.md -o README.rst python3 setup.py check -r -s || exit 1 upload: setup.py README.rst # Make sure we're on the master branch @if [ "$(shell git rev-parse --abbrev-ref HEAD)" != "master" ]; then exit 1; fi rm -f dist/* python3 setup.py bdist_wheel --universal gpg --detach-sign -a dist/* twine upload dist/* tag: @if [ "$(shell git rev-parse --abbrev-ref HEAD)" != "master" ]; then exit 1; fi @echo "Tagging v$(VERSION)..." git tag v$(VERSION) git push --tags publish: tag upload clean: rm -f README.rst meshio-1.11.7/README.md000066400000000000000000000057441323505575200143650ustar00rootroot00000000000000# meshio [![CircleCI](https://img.shields.io/circleci/project/github/nschloe/meshio/master.svg)](https://circleci.com/gh/nschloe/meshio) [![codecov](https://img.shields.io/codecov/c/github/nschloe/meshio.svg)](https://codecov.io/gh/nschloe/meshio) [![PyPi Version](https://img.shields.io/pypi/v/meshio.svg)](https://pypi.python.org/pypi/meshio) [![GitHub stars](https://img.shields.io/github/stars/nschloe/meshio.svg?style=social&label=Stars)](https://github.com/nschloe/meshio)

There are various mesh formats available for representing unstructured meshes, e.g., * [ANSYS msh](http://www.afs.enea.it/fluent/Public/Fluent-Doc/PDF/chp03.pdf) * [DOLFIN XML](http://manpages.ubuntu.com/manpages/wily/man1/dolfin-convert.1.html) * [Exodus](https://cubit.sandia.gov/public/13.2/help_manual/WebHelp/finite_element_model/exodus/block_specification.htm) * [H5M](https://www.mcs.anl.gov/~fathom/moab-docs/h5mmain.html) * [Medit](https://people.sc.fsu.edu/~jburkardt/data/medit/medit.html) * [MED/Salome](http://docs.salome-platform.org/latest/dev/MEDCoupling/med-file.html) * [Gmsh](http://gmsh.info/doc/texinfo/gmsh.html#File-formats) * [OFF](http://segeval.cs.princeton.edu/public/off_format.html) * [PERMAS](http://www.intes.de) * [STL](https://en.wikipedia.org/wiki/STL_(file_format)) * [VTK](https://www.vtk.org/wp-content/uploads/2015/04/file-formats.pdf) * [VTU](https://www.vtk.org/Wiki/VTK_XML_Formats) * [XDMF](http://www.xdmf.org/index.php/XDMF_Model_and_Format) meshio can read and write all of these formats and smoothly converts between them. Simply call ``` meshio-convert input.msh output.vtu ``` with any of the supported formats. In Python, simply call ```python points, cells, point_data, cell_data, field_data = \ meshio.read(args.infile) ``` to read a mesh. To write, do ```python points = numpy.array([ [0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], ]) cells = { 'triangle': numpy.array([ [0, 1, 2] ]) } meshio.write( 'foo.vtk', points, cells, # Optionally provide extra data on points, cells, etc. # point_data=point_data, # cell_data=cell_data, # field_data=field_data ) ``` For both input and output, you can optionally specify the exact `file_format` (in case you would like to enforce binary over ASCII VTK, for example). ### Installation meshio is [available from the Python Package Index](https://pypi.python.org/pypi/meshio/), so simply type ``` pip install -U meshio ``` to install or upgrade. ### Usage Just ``` import meshio ``` and make use of all the goodies the module provides. ### Testing To run the meshio unit tests, check out this repository and type ``` pytest ``` ### Distribution To create a new release 1. bump the `__version__` number, 2. tag and upload to PyPi: ``` make publish ``` ### License meshio is published under the [MIT license](https://en.wikipedia.org/wiki/MIT_License). meshio-1.11.7/codecov.yml000066400000000000000000000002401323505575200152350ustar00rootroot00000000000000comment: no # https://github.com/codecov/support/issues/396#issuecomment-300879528 codecov: disable_default_path_fixes: true fixes: - ".*/dist-packages/::" meshio-1.11.7/meshio/000077500000000000000000000000001323505575200143605ustar00rootroot00000000000000meshio-1.11.7/meshio/__about__.py000066400000000000000000000006011323505575200166350ustar00rootroot00000000000000# -*- coding: utf-8 -*- # __version__ = '1.11.7' __author__ = u'Nico Schlömer' __author_email__ = 'nico.schloemer@gmail.com' __copyright__ = \ u'Copyright (c) 2015-2018, {} <{}>'.format(__author__, __author_email__) __website__ = 'https://github.com/nschloe/meshio' __license__ = 'License :: OSI Approved :: MIT License' __status__ = 'Development Status :: 5 - Production/Stable' meshio-1.11.7/meshio/__init__.py000066400000000000000000000005721323505575200164750ustar00rootroot00000000000000# -*- coding: utf-8 -*- # from __future__ import print_function from .__about__ import ( __version__, __author__, __author_email__, __website__ ) # pylint: disable=wildcard-import from .helpers import * try: import pipdate except ImportError: pass else: if pipdate.needs_checking(__name__): print(pipdate.check(__name__, __version__)) meshio-1.11.7/meshio/ansys_io.py000066400000000000000000000355041323505575200165650ustar00rootroot00000000000000# -*- coding: utf-8 -*- # ''' I/O for Ansys's msh format, cf. . ''' import logging import re import numpy from . __about__ import __version__ def _skip_to(f, char): c = None while c != char: c = f.read(1).decode('utf-8') return def _skip_close(f, num_open_brackets): while num_open_brackets > 0: char = f.read(1).decode('utf-8') if char == '(': num_open_brackets += 1 elif char == ')': num_open_brackets -= 1 return def _read_points(f, line, first_point_index_overall, last_point_index): # If the line is self-contained, it is merely a declaration # of the total number of points. if line.count('(') == line.count(')'): return None, None, None # (3010 (zone-id first-index last-index type ND) out = re.match('\\s*\\(\\s*(|20|30)10\\s*\\(([^\\)]*)\\).*', line) a = [int(num, 16) for num in out.group(2).split()] assert len(a) > 4 first_point_index = a[1] # store the very first point index if first_point_index_overall is None: first_point_index_overall = first_point_index # make sure that point arrays are subsequent if last_point_index is not None: assert last_point_index + 1 == first_point_index last_point_index = a[2] num_points = last_point_index - first_point_index + 1 dim = a[4] # Skip ahead to the byte that opens the data block (might # be the current line already). last_char = line.strip()[-1] while last_char != '(': last_char = f.read(1).decode('utf-8') if out.group(1) == '': # ASCII data pts = numpy.empty((num_points, dim)) for k in range(num_points): # skip ahead to the first line with data line = '' while line.strip() == '': line = f.readline().decode('utf-8') dat = line.split() assert len(dat) == dim for d in range(dim): pts[k][d] = float(dat[d]) else: # binary data if out.group(1) == '20': dtype = numpy.float32 bytes_per_item = 4 else: assert out.group(1) == '30' dtype = numpy.float64 bytes_per_item = 8 # read point data total_bytes = dim * bytes_per_item * num_points pts = numpy.fromstring( f.read(total_bytes), dtype=dtype ).reshape((num_points, dim)) # make sure that the data set is properly closed _skip_close(f, 2) return pts, first_point_index_overall, last_point_index def _read_cells(f, line): # If the line is self-contained, it is merely a declaration of the total # number of points. if line.count('(') == line.count(')'): return None, None out = re.match('\\s*\\(\\s*(|20|30)12\\s*\\(([^\\)]+)\\).*', line) a = [int(num, 16) for num in out.group(2).split()] assert len(a) > 4 first_index = a[1] last_index = a[2] num_cells = last_index - first_index + 1 element_type = a[4] element_type_to_key_num_nodes = { 0: ('mixed', None), 1: ('triangle', 3), 2: ('tetra', 4), 3: ('quad', 4), 4: ('hexahedron', 8), 5: ('pyra', 5), 6: ('wedge', 6), } key, num_nodes_per_cell = \ element_type_to_key_num_nodes[element_type] # Skip to the opening `(` and make sure that there's no non-whitespace # character between the last closing bracket and the `(`. if line.strip()[-1] != '(': c = None while True: c = f.read(1).decode('utf-8') if c == '(': break if not re.match('\\s', c): # Found a non-whitespace character before `(`. # Assume this is just a declaration line then and # skip to the closing bracket. _skip_to(f, ')') return None, None assert key != 'mixed' # read cell data if out.group(1) == '': # ASCII cells data = numpy.empty( (num_cells, num_nodes_per_cell), dtype=int ) for k in range(num_cells): line = f.readline().decode('utf-8') dat = line.split() assert len(dat) == num_nodes_per_cell data[k] = [int(d, 16) for d in dat] else: # binary cells if out.group(1) == '20': bytes_per_item = 4 dtype = numpy.int32 else: assert out.group(1) == '30' bytes_per_item = 8 dtype = numpy.int64 total_bytes = \ bytes_per_item * num_nodes_per_cell * num_cells data = numpy.fromstring( f.read(total_bytes), count=(num_nodes_per_cell*num_cells), dtype=dtype ).reshape((num_cells, num_nodes_per_cell)) # make sure that the data set is properly closed _skip_close(f, 2) return key, data def _read_faces(f, line): # faces # (13 (zone-id first-index last-index type element-type)) # If the line is self-contained, it is merely a declaration of # the total number of points. if line.count('(') == line.count(')'): return {} out = re.match('\\s*\\(\\s*(|20|30)13\\s*\\(([^\\)]+)\\).*', line) a = [int(num, 16) for num in out.group(2).split()] assert len(a) > 4 first_index = a[1] last_index = a[2] num_cells = last_index - first_index + 1 element_type = a[4] element_type_to_key_num_nodes = { 0: ('mixed', None), 2: ('line', 2), 3: ('triangle', 3), 4: ('quad', 4) } key, num_nodes_per_cell = \ element_type_to_key_num_nodes[element_type] # Skip ahead to the line that opens the data block (might be # the current line already). if line.strip()[-1] != '(': _skip_to(f, '(') data = {} if out.group(1) == '': # ASCII if key == 'mixed': # From # : # > If the face zone is of mixed type (element-type = # > 0), the body of the section will include the face # > type and will appear as follows # > # > type v0 v1 v2 c0 c1 # > for k in range(num_cells): line = '' while line.strip() == '': line = f.readline().decode('utf-8') dat = line.split() type_index = int(dat[0], 16) assert type_index != 0 type_string, num_nodes_per_cell = \ element_type_to_key_num_nodes[type_index] assert len(dat) == num_nodes_per_cell + 3 if type_string not in data: data[type_string] = [] data[type_string].append([ int(d, 16) for d in dat[1:num_nodes_per_cell+1] ]) data = {key: numpy.array(data[key]) for key in data} else: # read cell data data = numpy.empty( (num_cells, num_nodes_per_cell), dtype=int ) for k in range(num_cells): line = f.readline().decode('utf-8') dat = line.split() # The body of a regular face section contains the # grid connectivity, and each line appears as # follows: # n0 n1 n2 cr cl # where n* are the defining nodes (vertices) of the # face, and c* are the adjacent cells. assert len(dat) == num_nodes_per_cell + 2 data[k] = [ int(d, 16) for d in dat[:num_nodes_per_cell] ] data = {key: data} else: # binary if out.group(1) == '20': bytes_per_item = 4 dtype = numpy.int32 else: assert out.group(1) == '30' bytes_per_item = 8 dtype = numpy.int64 assert key != 'mixed' # Read cell data. # The body of a regular face section contains the grid # connectivity, and each line appears as follows: # n0 n1 n2 cr cl # where n* are the defining nodes (vertices) of the face, # and c* are the adjacent cells. total_bytes = \ num_cells * bytes_per_item * (num_nodes_per_cell + 2) data = numpy.fromstring( f.read(total_bytes), dtype=dtype ).reshape((num_cells, num_nodes_per_cell + 2)) # Cut off the adjacent cell data. data = data[:, :num_nodes_per_cell] data = {key: data} # make sure that the data set is properly closed _skip_close(f, 2) return data def read(filename): # Initialize the data optional data fields field_data = {} cell_data = {} point_data = {} points = [] cells = {} first_point_index_overall = None last_point_index = None # read file in binary mode since some data might be binary with open(filename, 'rb') as f: while True: line = f.readline().decode('utf-8') if not line: break if line.strip() == '': continue # expect the line to have the form # ( [...] out = re.match('\\s*\\(\\s*([0-9]+).*', line) assert out index = out.group(1) if index == '0': # Comment. _skip_close(f, line.count('(') - line.count(')')) elif index == '1': # header # (1 "") _skip_close(f, line.count('(') - line.count(')')) elif index == '2': # dimensionality # (2 3) _skip_close(f, line.count('(') - line.count(')')) elif re.match('(|20|30)10', index): # points pts, first_point_index_overall, last_point_index = \ _read_points( f, line, first_point_index_overall, last_point_index ) if pts is not None: points.append(pts) elif re.match('(|20|30)12', index): # cells # (2012 (zone-id first-index last-index type element-type)) key, data = _read_cells(f, line) if data is not None: cells[key] = data elif re.match('(|20|30)13', index): data = _read_faces(f, line) for key in data: if key in cells: cells[key] = numpy.concatenate([cells[key], data[key]]) else: cells[key] = data[key] elif index == '39': logging.warning( 'Zone specification not supported yet. Skipping.' ) _skip_close(f, line.count('(') - line.count(')')) elif index == '45': # (45 (2 fluid solid)()) obj = re.match( '\\(45 \\([0-9]+ ([\\S]+) ([\\S]+)\\)\\(\\)\\)', line ) if obj: logging.warning( 'Zone specification not supported yet (%r, %r). ' 'Skipping.', obj.group(1), obj.group(2) ) else: logging.warning('Zone specification not supported yet.') else: logging.warning('Unknown index %r. Skipping.', index) # Skipping ahead to the next line with two closing brackets. _skip_close(f, line.count('(') - line.count(')')) points = numpy.concatenate(points) # Gauge the cells with the first point_index. for key in cells: cells[key] -= first_point_index_overall return points, cells, point_data, cell_data, field_data def write( filename, points, cells, point_data=None, cell_data=None, field_data=None, write_binary=True, ): point_data = {} if point_data is None else point_data cell_data = {} if cell_data is None else cell_data field_data = {} if field_data is None else field_data with open(filename, 'wb') as fh: # header fh.write(('(1 "meshio {}")\n'.format(__version__)).encode('utf8')) # dimension dim = 2 if all(points[:, 2] == 0.0) else 3 fh.write(('(2 {})\n'.format(dim)).encode('utf8')) # total number of nodes first_node_index = 1 fh.write(( '(10 (0 {:x} {:x} 0))\n'.format(first_node_index, len(points)) ).encode('utf8')) # total number of cells total_num_cells = sum([len(c) for c in cells]) fh.write(( '(12 (0 1 {:x} 0))\n'.format(total_num_cells) ).encode('utf8')) # Write nodes key = '3010' if write_binary else '10' fh.write(( '({} (1 {:x} {:x} 1 {:x}))(\n'.format( key, first_node_index, points.shape[0], points.shape[1] )).encode('utf8')) if write_binary: fh.write(points.tostring()) fh.write('\n)'.encode('utf8')) fh.write('End of Binary Section 3010)\n'.encode('utf8')) else: numpy.savetxt(fh, points, fmt='%.15e') fh.write(('))\n').encode('utf8')) # Write cells meshio_to_ansys_type = { 'triangle': 1, 'tetra': 2, 'quad': 3, 'hexahedron': 4, 'pyra': 5, 'wedge': 6, } first_index = 0 binary_dtypes = { # numpy.int16 is not allowed numpy.dtype('int32'): '2012', numpy.dtype('int64'): '3012', } for cell_type, values in cells.items(): key = binary_dtypes[values.dtype] if write_binary else '12' last_index = first_index + len(values) - 1 fh.write(( '({} (1 {:x} {:x} 1 {})(\n'.format( key, first_index, last_index, meshio_to_ansys_type[cell_type] ) ).encode('utf8')) if write_binary: fh.write((values + first_node_index).tostring()) fh.write('\n)'.encode('utf8')) fh.write(( 'End of Binary Section {})\n'.format(key) ).encode('utf8')) else: numpy.savetxt(fh, values + first_node_index, fmt='%x') fh.write(('))\n').encode('utf8')) first_index = last_index + 1 return meshio-1.11.7/meshio/dolfin_io.py000066400000000000000000000163201323505575200166760ustar00rootroot00000000000000# -*- coding: utf-8 -*- # ''' I/O for DOLFIN's XML format, cf. . .. moduleauthor:: Nico Schlömer ''' import logging import os import re import xml.etree.cElementTree as ET import numpy def _read_mesh(filename): dolfin_to_meshio_type = { 'triangle': ('triangle', 3), 'tetrahedron': ('tetra', 4), } # Use iterparse() to avoid loading the entire file via parse(). iterparse() # allows to discard elements (via clear()) after they have been processed. # See . for event, elem in ET.iterparse(filename, events=('start', 'end')): if event == 'end': continue if elem.tag == 'dolfin': # Don't be too strict with the assertion. Some meshe files don't # have the proper tags. # assert elem.attrib['nsmap'] \ # == '{\'dolfin\': \'https://fenicsproject.org/\'}' pass elif elem.tag == 'mesh': dim = int(elem.attrib['dim']) cell_type, npc = dolfin_to_meshio_type[elem.attrib['celltype']] elif elem.tag == 'vertices': points = numpy.empty((int(elem.attrib['size']), 3)) keys = ['x', 'y'] if dim == 2: points[:, 2] = 0.0 else: assert dim == 3 keys += ['z'] elif elem.tag == 'vertex': k = int(elem.attrib['index']) points[k][:dim] = [float(elem.attrib[key]) for key in keys] elif elem.tag == 'cells': cells = { cell_type: numpy.empty((int(elem.attrib['size']), npc), dtype=int) } elif elem.tag in ['triangle', 'tetrahedron']: k = int(elem.attrib['index']) cells[cell_type][k] = [ int(elem.attrib['v{}'.format(i)]) for i in range(npc) ] else: logging.warning('Unknown entry %s. Ignoring.', elem.tag) elem.clear() return points, cells, cell_type def _read_cell_data(filename, cell_type): dolfin_type_to_numpy_type = { 'int': numpy.dtype('int'), 'float': numpy.dtype('float'), 'uint': numpy.dtype('uint'), } cell_data = {cell_type: {}} dir_name = os.path.dirname(filename) if not os.path.dirname(filename): dir_name = os.getcwd() # Loop over all files in the same directory as `filename`. basename = os.path.splitext(filename)[0] for f in os.listdir(dir_name): # Check if there are files by the name "_*.xml"; if yes, # extract the * pattern and make it the name of the data set. out = re.match('{}_([^\\.]+)\\.xml'.format(basename), f) if not out: continue name = out.group(1) tree = ET.parse(f) root = tree.getroot() mesh_functions = list(root) assert len(mesh_functions) == 1 mesh_function = mesh_functions[0] assert mesh_function.tag == 'mesh_function' size = int(mesh_function.attrib['size']) dtype = dolfin_type_to_numpy_type[mesh_function.attrib['type']] data = numpy.empty(size, dtype=dtype) for child in mesh_function: assert child.tag == 'entity' idx = int(child.attrib['index']) data[idx] = child.attrib['value'] cell_data[cell_type][name] = data return cell_data def read(filename): points, cells, cell_type = _read_mesh(filename) point_data = {} cell_data = _read_cell_data(filename, cell_type) field_data = {} return points, cells, point_data, cell_data, field_data def _write_mesh( filename, points, cell_type, cells ): stripped_cells = {cell_type: cells[cell_type]} dolfin = ET.Element( 'dolfin', nsmap={'dolfin': 'https://fenicsproject.org/'} ) meshio_to_dolfin_type = { 'triangle': 'triangle', 'tetra': 'tetrahedron', } if len(cells) > 1: discarded_cells = list(cells.keys()) discarded_cells.remove(cell_type) logging.warning( 'DOLFIN XML can only handle one cell type at a time. ' 'Using %s, discarding %s.', cell_type, ', '.join(discarded_cells) ) dim = 2 if all(points[:, 2] == 0) else 3 mesh = ET.SubElement( dolfin, 'mesh', celltype=meshio_to_dolfin_type[cell_type], dim=str(dim) ) vertices = ET.SubElement(mesh, 'vertices', size=str(len(points))) for k, point in enumerate(points): ET.SubElement( vertices, 'vertex', index=str(k), x=repr(point[0]), y=repr(point[1]), z=repr(point[2]) ) num_cells = 0 for cls in stripped_cells.values(): num_cells += len(cls) xcells = ET.SubElement(mesh, 'cells', size=str(num_cells)) idx = 0 for ct, cls in stripped_cells.items(): for cell in cls: cell_entry = ET.SubElement( xcells, meshio_to_dolfin_type[ct], index=str(idx) ) for k, c in enumerate(cell): cell_entry.attrib['v{}'.format(k)] = str(c) idx += 1 tree = ET.ElementTree(dolfin) tree.write(filename) return def _numpy_type_to_dolfin_type(dtype): types = ['int', 'uint', 'float'] for t in types: # issubtype handles all of int8, int16, float64 etc. if numpy.issubdtype(dtype, numpy.dtype(t)): return t return None def _write_cell_data( filename, dim, cell_data ): dolfin = ET.Element( 'dolfin', nsmap={'dolfin': 'https://fenicsproject.org/'} ) mesh_function = ET.SubElement( dolfin, 'mesh_function', type=_numpy_type_to_dolfin_type(cell_data.dtype), dim=str(dim), size=str(len(cell_data)) ) for k, value in enumerate(cell_data): ET.SubElement( mesh_function, 'entity', index=str(k), value=repr(value), ) tree = ET.ElementTree(dolfin) tree.write(filename) return def write( filename, points, cells, point_data=None, cell_data=None, field_data=None ): logging.warning( 'Dolfin\'s XML is a legacy format. Consider using XDMF instead.' ) point_data = {} if point_data is None else point_data cell_data = {} if cell_data is None else cell_data field_data = {} if field_data is None else field_data if 'tetra' in cells: cell_type = 'tetra' else: assert 'triangle' in cells cell_type = 'triangle' _write_mesh(filename, points, cell_type, cells) if cell_type in cell_data: for key, data in cell_data[cell_type].items(): cell_data_filename = \ '{}_{}.xml'.format(os.path.splitext(filename)[0], key) dim = 2 if all(points[:, 2] == 0) else 3 _write_cell_data(cell_data_filename, dim, numpy.array(data)) return meshio-1.11.7/meshio/exodus_io.py000066400000000000000000000153761323505575200167440ustar00rootroot00000000000000# -*- coding: utf-8 -*- # ''' I/O for Exodus II. See , in particular Appendix A (page 171, Implementation of EXODUS II with netCDF). .. moduleauthor:: Nico Schlömer ''' import datetime import numpy from .__about__ import __version__ exodus_to_meshio_type = { # curves 'BEAM': 'line', 'BEAM2': 'line', 'BEAM3': 'line3', 'BAR2': 'line', # surfaces 'SHELL': 'quad', 'SHELL4': 'quad', 'SHELL8': 'quad8', 'SHELL9': 'quad9', 'QUAD': 'quad', 'QUAD4': 'quad', 'QUAD5': 'quad5', 'QUAD8': 'quad8', 'QUAD9': 'quad9', # 'TRIANGLE': 'triangle', # 'TRI': 'triangle', 'TRI3': 'triangle', 'TRI7': 'triangle7', # 'TRISHELL': 'triangle', # 'TRISHELL3': 'triangle', # 'TRISHELL7': 'triangle', # 'TRI6': 'triangle6', # 'TRISHELL6': 'triangle6', # volumes 'HEX': 'hexahedron', 'HEXAHEDRON': 'hexahedron', 'HEX8': 'hexahedron', 'HEX9': 'hexahedron9', 'HEX20': 'hexahedron20', 'HEX27': 'hexahedron27', # 'TETRA': 'tetra', 'TETRA4': 'tetra4', 'TETRA8': 'tetra8', 'TETRA10': 'tetra10', 'TETRA14': 'tetra14', # 'PYRAMID': 'pyramid', 'WEDGE': 'wedge' } meshio_to_exodus_type = {v: k for k, v in exodus_to_meshio_type.items()} def read(filename): import netCDF4 nc = netCDF4.Dataset(filename) # assert nc.version == numpy.float32(5.1) # assert nc.api_version == numpy.float32(5.1) # assert nc.floating_point_word_size == 8 # assert b''.join(nc.variables['coor_names'][0]) == b'X' # assert b''.join(nc.variables['coor_names'][1]) == b'Y' # assert b''.join(nc.variables['coor_names'][2]) == b'Z' points = numpy.zeros((len(nc.dimensions['num_nodes']), 3)) point_data_names = [] pd = [] cells = {} for key, value in nc.variables.items(): if key[:7] == 'connect': meshio_type = exodus_to_meshio_type[value.elem_type.upper()] if meshio_type in cells: cells[meshio_type] = \ numpy.vstack([cells[meshio_type], value[:] - 1]) else: cells[meshio_type] = value[:] - 1 elif key == 'coord': points = nc.variables['coord'][:].T elif key == 'coordx': points[:, 0] = value[:] elif key == 'coordy': points[:, 1] = value[:] elif key == 'coordz': points[:, 2] = value[:] elif key == 'name_nod_var': value.set_auto_mask(False) point_data_names = [b''.join(c).decode('UTF-8') for c in value[:]] elif key == 'vals_nod_var': pd = value[0, :] point_data = {name: dat for name, dat in zip(point_data_names, pd)} nc.close() return points, cells, point_data, {}, {} numpy_to_exodus_dtype = { numpy.dtype(numpy.float32): 'f4', numpy.dtype(numpy.float64): 'f8', numpy.dtype(numpy.int8): 'i1', numpy.dtype(numpy.int16): 'i2', numpy.dtype(numpy.int32): 'i4', numpy.dtype(numpy.int64): 'i8', numpy.dtype(numpy.uint8): 'u1', numpy.dtype(numpy.uint16): 'u2', numpy.dtype(numpy.uint32): 'u4', numpy.dtype(numpy.uint64): 'u8', } def write(filename, points, cells, point_data=None, cell_data=None, field_data=None ): import netCDF4 point_data = {} if point_data is None else point_data cell_data = {} if cell_data is None else cell_data field_data = {} if field_data is None else field_data rootgrp = netCDF4.Dataset(filename, 'w') # set global data rootgrp.title = \ 'Created by meshio v{}, {}'.format( __version__, datetime.datetime.now().isoformat() ) rootgrp.version = numpy.float32(5.1) rootgrp.api_version = numpy.float32(5.1) rootgrp.floating_point_word_size = 8 # set dimensions total_num_elems = sum([v.shape[0] for v in cells.values()]) rootgrp.createDimension('num_nodes', len(points)) rootgrp.createDimension('num_dim', 3) rootgrp.createDimension('num_elem', total_num_elems) rootgrp.createDimension('num_el_blk', len(cells)) rootgrp.createDimension('len_string', 33) rootgrp.createDimension('len_line', 81) rootgrp.createDimension('four', 4) rootgrp.createDimension('time_step', None) # dummy time step data = rootgrp.createVariable('time_whole', 'f4', 'time_step') data[:] = 0.0 # points coor_names = rootgrp.createVariable( 'coor_names', 'S1', ('num_dim', 'len_string'), ) coor_names.set_auto_mask(False) coor_names[0, 0] = 'X' coor_names[1, 0] = 'Y' coor_names[2, 0] = 'Z' data = rootgrp.createVariable( 'coord', numpy_to_exodus_dtype[points.dtype], ('num_dim', 'num_nodes') ) data[:] = points.T # cells # ParaView needs eb_prop1 -- some ID. The values don't seem to matter as # long as they are different for the for different blocks. data = rootgrp.createVariable('eb_prop1', 'i4', 'num_el_blk') for k in range(len(cells)): data[k] = k for k, (key, values) in enumerate(cells.items()): dim1 = 'num_el_in_blk{}'.format(k+1) dim2 = 'num_nod_per_el{}'.format(k+1) rootgrp.createDimension(dim1, values.shape[0]) rootgrp.createDimension(dim2, values.shape[1]) dtype = numpy_to_exodus_dtype[values.dtype] data = rootgrp.createVariable( 'connect{}'.format(k+1), dtype, (dim1, dim2) ) data.elem_type = meshio_to_exodus_type[key] # Exodus is 1-based data[:] = values + 1 # point data # The variable `name_nod_var` holds the names and indices of the node # variables, the variable `vals_nod_var` hold the actual data. num_nod_var = len(point_data) if num_nod_var > 0: rootgrp.createDimension('num_nod_var', num_nod_var) # set names point_data_names = rootgrp.createVariable( 'name_nod_var', 'S1', ('num_nod_var', 'len_string') ) point_data_names.set_auto_mask(False) for k, name in enumerate(point_data.keys()): for i, letter in enumerate(name): point_data_names[k, i] = letter.encode('utf-8') # Set data. # Deliberately take the dtype from the first data block. first_key = list(point_data.keys())[0] dtype = numpy_to_exodus_dtype[point_data[first_key].dtype] node_data = rootgrp.createVariable( 'vals_nod_var', dtype, ('time_step', 'num_nod_var', 'num_nodes') ) for k, (name, data) in enumerate(point_data.items()): node_data[0, k] = data rootgrp.close() return meshio-1.11.7/meshio/gmsh_io.py000066400000000000000000000463501323505575200163670ustar00rootroot00000000000000# -*- coding: utf-8 -*- # ''' I/O for Gmsh's msh format, cf. . .. moduleauthor:: Nico Schlömer ''' import logging import struct import numpy from .vtk_io import raw_from_cell_data num_nodes_per_cell = { 'vertex': 1, 'line': 2, 'triangle': 3, 'quad': 4, 'quad8': 8, 'tetra': 4, 'hexahedron': 8, 'hexahedron20': 20, 'wedge': 6, 'pyramid': 5, # 'line3': 3, 'triangle6': 6, 'quad9': 9, 'tetra10': 10, 'hexahedron27': 27, 'prism18': 18, 'pyramid14': 14, # 'line4': 4, 'triangle10': 10, 'quad16': 16, 'tetra20': 20, 'hexahedron64': 64, # 'line5': 5, 'triangle15': 15, 'quad25': 25, 'tetra35': 35, 'hexahedron125': 125, # 'line6': 6, 'triangle21': 21, 'quad36': 36, 'tetra56': 56, 'hexahedron216': 216, } # Translate meshio types to gmsh codes # http://gmsh.info//doc/texinfo/gmsh.html#MSH-ASCII-file-format _gmsh_to_meshio_type = { 1: 'line', 2: 'triangle', 3: 'quad', 4: 'tetra', 5: 'hexahedron', 6: 'wedge', 7: 'pyramid', 8: 'line3', 9: 'triangle6', 10: 'quad9', 11: 'tetra10', 12: 'hexahedron27', 13: 'prism18', 14: 'pyramid14', 15: 'vertex', 16: 'quad8', 17: 'hexahedron20', 21: 'triangle10', 23: 'triangle15', 25: 'triangle21', 26: 'line4', 27: 'line5', 28: 'line6', 29: 'tetra20', 30: 'tetra35', 31: 'tetra56', 36: 'quad16', 37: 'quad25', 38: 'quad36', 92: 'hexahedron64', 93: 'hexahedron125', 94: 'hexahedron216', } _meshio_to_gmsh_type = {v: k for k, v in _gmsh_to_meshio_type.items()} def read(filename): '''Reads a Gmsh msh file. ''' with open(filename, 'rb') as f: out = read_buffer(f) return out def _read_header(f, int_size): line = f.readline().decode('utf-8') # Split the line # 2.2 0 8 # into its components. str_list = list(filter(None, line.split())) assert str_list[0][0] == '2', 'Need mesh format 2' assert str_list[1] in ['0', '1'] is_ascii = str_list[1] == '0' data_size = int(str_list[2]) if not is_ascii: # The next line is the integer 1 in bytes. Useful for checking # endianness. Just assert that we get 1 here. one = f.read(int_size) assert struct.unpack('i', one)[0] == 1 line = f.readline().decode('utf-8') assert line == '\n' line = f.readline().decode('utf-8') assert line.strip() == '$EndMeshFormat' return data_size, is_ascii def _read_physical_names(f, field_data): line = f.readline().decode('utf-8') num_phys_names = int(line) for _ in range(num_phys_names): line = f.readline().decode('utf-8') key = line.split(' ')[2].replace('"', '').replace('\n', '') phys_group = int(line.split(' ')[1]) phys_dim = int(line.split(' ')[0]) value = numpy.array([phys_group, phys_dim], dtype=int) field_data[key] = value line = f.readline().decode('utf-8') assert line.strip() == '$EndPhysicalNames' return def _read_nodes(f, is_ascii, int_size, data_size): # The first line is the number of nodes line = f.readline().decode('utf-8') num_nodes = int(line) if is_ascii: points = numpy.fromfile( f, count=num_nodes*4, sep=' ' ).reshape((num_nodes, 4)) # The first number is the index points = points[:, 1:] else: # binary num_bytes = num_nodes * (int_size + 3 * data_size) assert numpy.int32(0).nbytes == int_size assert numpy.float64(0.0).nbytes == data_size dtype = [('index', numpy.int32), ('x', numpy.float64, (3,))] data = numpy.fromstring(f.read(num_bytes), dtype=dtype) assert (data['index'] == range(1, num_nodes+1)).all() points = numpy.ascontiguousarray(data['x']) line = f.readline().decode('utf-8') assert line == '\n' line = f.readline().decode('utf-8') assert line.strip() == '$EndNodes' return points def _read_cells(f, cells, int_size, is_ascii): # The first line is the number of elements line = f.readline().decode('utf-8') total_num_cells = int(line) has_additional_tag_data = False cell_tags = {} if is_ascii: for _ in range(total_num_cells): line = f.readline().decode('utf-8') data = [int(k) for k in filter(None, line.split())] t = _gmsh_to_meshio_type[data[1]] num_nodes_per_elem = num_nodes_per_cell[t] if t not in cells: cells[t] = [] cells[t].append(data[-num_nodes_per_elem:]) # data[2] gives the number of tags. The gmsh manual # # says: # >>> # By default, the first tag is the number of the physical entity to # which the element belongs; the second is the number of the # elementary geometrical entity to which the element belongs; the # third is the number of mesh partitions to which the element # belongs, followed by the partition ids (negative partition ids # indicate ghost cells). A zero tag is equivalent to no tag. Gmsh # and most codes using the MSH 2 format require at least the first # two tags (physical and elementary tags). # <<< num_tags = data[2] if t not in cell_tags: cell_tags[t] = [] cell_tags[t].append(data[3:3+num_tags]) # convert to numpy arrays for key in cells: cells[key] = numpy.array(cells[key], dtype=int) for key in cell_tags: cell_tags[key] = numpy.array(cell_tags[key], dtype=int) else: # binary num_elems = 0 while num_elems < total_num_cells: # read element header elem_type = struct.unpack('i', f.read(int_size))[0] t = _gmsh_to_meshio_type[elem_type] num_nodes_per_elem = num_nodes_per_cell[t] num_elems0 = struct.unpack('i', f.read(int_size))[0] num_tags = struct.unpack('i', f.read(int_size))[0] # assert num_tags >= 2 # read element data num_bytes = 4 * ( num_elems0 * (1 + num_tags + num_nodes_per_elem) ) shape = \ (num_elems0, 1 + num_tags + num_nodes_per_elem) b = f.read(num_bytes) data = numpy.fromstring( b, dtype=numpy.int32 ).reshape(shape) if t not in cells: cells[t] = [] cells[t].append(data[:, -num_nodes_per_elem:]) if t not in cell_tags: cell_tags[t] = [] cell_tags[t].append(data[:, 1:num_tags+1]) num_elems += num_elems0 # collect cells for key in cells: cells[key] = numpy.vstack(cells[key]) # collect cell tags for key in cell_tags: cell_tags[key] = numpy.vstack(cell_tags[key]) line = f.readline().decode('utf-8') assert line == '\n' line = f.readline().decode('utf-8') assert line.strip() == '$EndElements' # Subtract one to account for the fact that python indices are # 0-based. for key in cells: cells[key] -= 1 # restrict to the standard two data items (physical, geometrical) output_cell_tags = {} for key in cell_tags: if cell_tags[key].shape[1] > 2: has_additional_tag_data = True output_cell_tags[key] = {} if cell_tags[key].shape[1] > 0: output_cell_tags[key]['gmsh:physical'] = cell_tags[key][:, 0] if cell_tags[key].shape[1] > 1: output_cell_tags[key]['gmsh:geometrical'] = cell_tags[key][:, 1] return has_additional_tag_data, output_cell_tags def _read_data(f, tag, data_dict, int_size, data_size, is_ascii): # Read string tags num_string_tags = int(f.readline().decode('utf-8')) string_tags = [ f.readline().decode('utf-8').strip() for _ in range(num_string_tags) ] # The real tags typically only contain one value, the time. # Discard it. num_real_tags = int(f.readline().decode('utf-8')) for _ in range(num_real_tags): f.readline() num_integer_tags = int(f.readline().decode('utf-8')) integer_tags = [ int(f.readline().decode('utf-8')) for _ in range(num_integer_tags) ] num_components = integer_tags[1] num_items = integer_tags[2] if is_ascii: data = numpy.fromfile( f, count=num_items*(1+num_components), sep=' ' ).reshape((num_items, 1+num_components)) # The first number is the index data = data[:, 1:] else: # binary num_bytes = num_items * (int_size + num_components * data_size) assert numpy.int32(0).nbytes == int_size assert numpy.float64(0.0).nbytes == data_size dtype = [ ('index', numpy.int32), ('values', numpy.float64, (num_components,)) ] data = numpy.fromstring(f.read(num_bytes), dtype=dtype) assert (data['index'] == range(1, num_items+1)).all() data = numpy.ascontiguousarray(data['values']) line = f.readline().decode('utf-8') assert line == '\n' line = f.readline().decode('utf-8') assert line.strip() == '$End{}'.format(tag) # The gmsh format cannot distingiush between data of shape (n,) and (n, 1). # If shape[1] == 1, cut it off. if data.shape[1] == 1: data = data[:, 0] data_dict[string_tags[0]] = data return def read_buffer(f): # The format is specified at # . # Initialize the optional data fields points = [] cells = {} field_data = {} cell_data_raw = {} cell_tags = {} point_data = {} is_ascii = None int_size = 4 data_size = None while True: line = f.readline().decode('utf-8') if not line: # EOF break assert line[0] == '$' environ = line[1:].strip() if environ == 'MeshFormat': data_size, is_ascii = _read_header(f, int_size) elif environ == 'PhysicalNames': _read_physical_names(f, field_data) elif environ == 'Nodes': points = _read_nodes(f, is_ascii, int_size, data_size) elif environ == 'Elements': has_additional_tag_data, cell_tags = \ _read_cells(f, cells, int_size, is_ascii) elif environ == 'NodeData': _read_data( f, 'NodeData', point_data, int_size, data_size, is_ascii ) else: assert environ == 'ElementData', \ 'Unknown environment \'{}\'.'.format(environ) _read_data( f, 'ElementData', cell_data_raw, int_size, data_size, is_ascii ) if has_additional_tag_data: logging.warning( 'The file contains tag data that couldn\'t be processed.' ) cell_data = cell_data_from_raw(cells, cell_data_raw) # merge cell_tags into cell_data for key, tag_dict in cell_tags.items(): if key not in cell_data: cell_data[key] = {} for name, item_list in tag_dict.items(): assert name not in cell_data[key] cell_data[key][name] = item_list return points, cells, point_data, cell_data, field_data def cell_data_from_raw(cells, cell_data_raw): cell_data = {k: {} for k in cells} for key in cell_data_raw: d = cell_data_raw[key] r = 0 for k in cells: cell_data[k][key] = d[r:r+len(cells[k])] r += len(cells[k]) return cell_data def _write_physical_names(fh, field_data): # Write physical names entries = [] for phys_name in field_data: try: phys_num, phys_dim = field_data[phys_name] phys_num, phys_dim = int(phys_num), int(phys_dim) entries.append((phys_dim, phys_num, phys_name)) except (ValueError, TypeError): logging.warning( 'Field data contains entry that cannot be processed.' ) entries.sort() if entries: fh.write('$PhysicalNames\n'.encode('utf-8')) fh.write('{}\n'.format(len(entries)).encode('utf-8')) for entry in entries: fh.write('{} {} "{}"\n'.format(*entry).encode('utf-8')) fh.write('$EndPhysicalNames\n'.encode('utf-8')) return def _write_nodes(fh, points, write_binary): fh.write('$Nodes\n'.encode('utf-8')) fh.write('{}\n'.format(len(points)).encode('utf-8')) if write_binary: dtype = [('index', numpy.int32), ('x', numpy.float64, (3,))] tmp = numpy.empty(len(points), dtype=dtype) tmp['index'] = 1 + numpy.arange(len(points)) tmp['x'] = points fh.write(tmp.tostring()) fh.write('\n'.encode('utf-8')) else: for k, x in enumerate(points): fh.write( '{} {!r} {!r} {!r}\n'.format(k+1, x[0], x[1], x[2]) .encode('utf-8') ) fh.write('$EndNodes\n'.encode('utf-8')) return def _write_elements(fh, cells, tag_data, write_binary): # write elements fh.write('$Elements\n'.encode('utf-8')) # count all cells total_num_cells = sum([data.shape[0] for _, data in cells.items()]) fh.write('{}\n'.format(total_num_cells).encode('utf-8')) consecutive_index = 0 for cell_type, node_idcs in cells.items(): tags = [] for key in ['gmsh:physical', 'gmsh:geometrical']: try: tags.append(tag_data[cell_type][key]) except KeyError: pass fcd = numpy.concatenate([tags]).T # pylint: disable=len-as-condition if len(fcd) == 0: fcd = numpy.empty((len(node_idcs), 0), dtype=numpy.int32) if write_binary: # header fh.write(struct.pack('i', _meshio_to_gmsh_type[cell_type])) fh.write(struct.pack('i', node_idcs.shape[0])) fh.write(struct.pack('i', fcd.shape[1])) # actual data a = numpy.arange( len(node_idcs), dtype=numpy.int32 )[:, numpy.newaxis] a += 1 + consecutive_index array = numpy.hstack([a, fcd, node_idcs + 1]) fh.write(array.tostring()) else: form = '{} ' + str(_meshio_to_gmsh_type[cell_type]) \ + ' ' + str(fcd.shape[1]) \ + ' {} {}\n' for k, c in enumerate(node_idcs): fh.write( form.format( consecutive_index + k + 1, ' '.join([str(val) for val in fcd[k]]), ' '.join([str(cc + 1) for cc in c]) ).encode('utf-8') ) consecutive_index += len(node_idcs) if write_binary: fh.write('\n'.encode('utf-8')) fh.write('$EndElements\n'.encode('utf-8')) return def _write_data(fh, tag, name, data, write_binary): fh.write('${}\n'.format(tag).encode('utf-8')) # : # > Number of string tags. # > gives the number of string tags that follow. By default the first # > string-tag is interpreted as the name of the post-processing view and # > the second as the name of the interpolation scheme. The interpolation # > scheme is provided in the $InterpolationScheme section (see below). fh.write('{}\n'.format(1).encode('utf-8')) fh.write('{}\n'.format(name).encode('utf-8')) fh.write('{}\n'.format(1).encode('utf-8')) fh.write('{}\n'.format(0.0).encode('utf-8')) # three integer tags: fh.write('{}\n'.format(3).encode('utf-8')) # time step fh.write('{}\n'.format(0).encode('utf-8')) # number of components num_components = data.shape[1] if len(data.shape) > 1 else 1 assert num_components in [1, 3, 9], \ 'Gmsh only permits 1, 3, or 9 components per data field.' fh.write('{}\n'.format(num_components).encode('utf-8')) # num data items fh.write('{}\n'.format(data.shape[0]).encode('utf-8')) # actually write the data if write_binary: dtype = [ ('index', numpy.int32), ('data', numpy.float64, num_components) ] tmp = numpy.empty(len(data), dtype=dtype) tmp['index'] = 1 + numpy.arange(len(data)) tmp['data'] = data fh.write(tmp.tostring()) fh.write('\n'.encode('utf-8')) else: fmt = ' '.join(['{}'] + ['{!r}'] * num_components) + '\n' # TODO unify if num_components == 1: for k, x in enumerate(data): fh.write(fmt.format(k+1, x).encode('utf-8')) else: for k, x in enumerate(data): fh.write(fmt.format(k+1, *x).encode('utf-8')) fh.write('$End{}\n'.format(tag).encode('utf-8')) return def write( filename, points, cells, point_data=None, cell_data=None, field_data=None, write_binary=True, ): '''Writes msh files, cf. . ''' point_data = {} if point_data is None else point_data cell_data = {} if cell_data is None else cell_data field_data = {} if field_data is None else field_data if write_binary: for key in cells: if cells[key].dtype != numpy.int32: logging.warning( 'Binary Gmsh needs 32-bit integers (got %s). Converting.', cells[key].dtype ) cells[key] = numpy.array(cells[key], dtype=numpy.int32) with open(filename, 'wb') as fh: mode_idx = 1 if write_binary else 0 size_of_double = 8 fh.write(( '$MeshFormat\n2.2 {} {}\n'.format(mode_idx, size_of_double) ).encode('utf-8')) if write_binary: fh.write(struct.pack('i', 1)) fh.write('\n'.encode('utf-8')) fh.write('$EndMeshFormat\n'.encode('utf-8')) if field_data: _write_physical_names(fh, field_data) # Split the cell data: gmsh:physical and gmsh:geometrical are tags, the # rest is actual cell data. tag_data = {} other_data = {} for cell_type, a in cell_data.items(): tag_data[cell_type] = {} other_data[cell_type] = {} for key, data in a.items(): if key in ['gmsh:physical', 'gmsh:geometrical']: tag_data[cell_type][key] = data else: other_data[cell_type][key] = data _write_nodes(fh, points, write_binary) _write_elements(fh, cells, tag_data, write_binary) for name, dat in point_data.items(): _write_data(fh, 'NodeData', name, dat, write_binary) cell_data_raw = raw_from_cell_data(other_data) for name, dat in cell_data_raw.items(): _write_data(fh, 'ElementData', name, dat, write_binary) return meshio-1.11.7/meshio/h5m_io.py000066400000000000000000000203511323505575200161130ustar00rootroot00000000000000# -*- coding: utf-8 -*- # ''' I/O for h5m, cf. . .. moduleauthor:: Nico Schlömer ''' from datetime import datetime import logging import numpy from . import __about__ # def _int_to_bool_list(num): # # From . # bin_string = format(num, '04b') # return [x == '1' for x in bin_string[::-1]] def read(filename): '''Reads H5M files, cf. https://trac.mcs.anl.gov/projects/ITAPS/wiki/MOAB/h5m. ''' import h5py f = h5py.File(filename, 'r') dset = f['tstt'] points = dset['nodes']['coordinates'][()] # read point data point_data = {} if 'tags' in dset['nodes']: for name, dataset in dset['nodes']['tags'].items(): point_data[name] = dataset[()] # # Assert that the GLOBAL_IDs are contiguous. # point_gids = dset['nodes']['tags']['GLOBAL_ID'][()] # point_start_gid = dset['nodes']['coordinates'].attrs['start_id'] # point_end_gid = point_start_gid + len(point_gids) - 1 # assert all(point_gids == range(point_start_gid, point_end_gid + 1)) h5m_to_meshio_type = { 'Edge2': 'line', 'Tri3': 'triangle', 'Tet4': 'tetra' } cells = {} cell_data = {} for h5m_type, data in dset['elements'].items(): meshio_type = h5m_to_meshio_type[h5m_type] conn = data['connectivity'] # Note that the indices are off by 1 in h5m. cells[meshio_type] = conn[()] - 1 # TODO bring cell data back # if 'tags' in data: # for name, dataset in data['tags'].items(): # cell_data[name] = dataset[()] # The `sets` in H5M are special in that they represent a segration of data # in the current file, particularly by a load balancer (Metis, Zoltan, # etc.). This segregation has no equivalent in other data types, but is # certainly worthwhile visualizing. # Hence, we will translate the sets into cell data with the prefix "set::" # here. field_data = {} # TODO deal with sets # if 'sets' in dset and 'contents' in dset['sets']: # # read sets # sets_contents = dset['sets']['contents'][()] # sets_list = dset['sets']['list'][()] # sets_tags = dset['sets']['tags'] # cell_start_gid = conn.attrs['start_id'] # cell_gids = cell_start_gid + elems['tags']['GLOBAL_ID'][()] # cell_end_gid = cell_start_gid + len(cell_gids) - 1 # assert all(cell_gids == range(cell_start_gid, cell_end_gid + 1)) # # create the sets # for key, value in sets_tags.items(): # mod_key = 'set::' + key # cell_data[mod_key] = numpy.empty(len(cells), dtype=int) # end = 0 # for k, row in enumerate(sets_list): # bits = _int_to_bool_list(row[3]) # # is_owner = bits[0] # # is_unique = bits[1] # # is_ordered = bits[2] # is_range_compressed = bits[3] # if is_range_compressed: # start_gids = sets_contents[end:row[0]+1:2] # lengths = sets_contents[end+1:row[0]+1:2] # for start_gid, length in zip(start_gids, lengths): # end_gid = start_gid + length - 1 # if start_gid >= cell_start_gid and \ # end_gid <= cell_end_gid: # i0 = start_gid - cell_start_gid # i1 = end_gid - cell_start_gid + 1 # cell_data[mod_key][i0:i1] = value[k] # else: # # TODO deal with point data # raise RuntimeError('') # else: # gids = sets_contents[end:row[0]+1] # cell_data[mod_key][gids - cell_start_gid] = value[k] # end = row[0] + 1 return points, cells, point_data, cell_data, field_data def write( filename, points, cells, point_data=None, cell_data=None, field_data=None, add_global_ids=True ): '''Writes H5M files, cf. https://trac.mcs.anl.gov/projects/ITAPS/wiki/MOAB/h5m. ''' import h5py point_data = {} if point_data is None else point_data cell_data = {} if cell_data is None else cell_data field_data = {} if field_data is None else field_data f = h5py.File(filename, 'w') tstt = f.create_group('tstt') # The base index for h5m is 1. global_id = 1 # add nodes nodes = tstt.create_group('nodes') coords = nodes.create_dataset('coordinates', data=points) coords.attrs.create('start_id', global_id) global_id += len(points) # Global tags tstt_tags = tstt.create_group('tags') # The GLOBAL_ID associated with a point is used to identify points if # distributed across several processes. mbpart automatically adds them, # too. if 'GLOBAL_ID' not in point_data and add_global_ids: point_data['GLOBAL_ID'] = numpy.arange(1, len(points)+1, ) # add point data if point_data is not None: tags = nodes.create_group('tags') for key, data in point_data.items(): if len(data.shape) == 1: dtype = data.dtype tags.create_dataset(key, data=data) else: # H5M doesn't accept n-x-k arrays as data; it wants an n-x-1 # array with k-tuples as entries. n, k = data.shape dtype = numpy.dtype((data.dtype, (k,))) dset = tags.create_dataset(key, (n,), dtype=dtype) dset[:] = data # Create entry in global tags g = tstt_tags.create_group(key) g['type'] = dtype # Add a class tag: # From # : # ``` # /* Was dense tag data in mesh database */ # define mhdf_DENSE_TYPE 2 # /** \brief Was sparse tag data in mesh database */ # #define mhdf_SPARSE_TYPE 1 # /** \brief Was bit-field tag data in mesh database */ # #define mhdf_BIT_TYPE 0 # /** \brief Unused */ # #define mhdf_MESH_TYPE 3 # g.attrs['class'] = 2 # add elements elements = tstt.create_group('elements') elem_dt = h5py.special_dtype( enum=('i', { 'Edge': 1, 'Tri': 2, 'Quad': 3, 'Polygon': 4, 'Tet': 5, 'Pyramid': 6, 'Prism': 7, 'Knife': 8, 'Hex': 9, 'Polyhedron': 10 }) ) tstt['elemtypes'] = elem_dt tstt.create_dataset( 'history', data=[ __name__.encode('utf-8'), __about__.__version__.encode('utf-8'), str(datetime.now()).encode('utf-8') ] ) # number of nodes to h5m name, element type meshio_to_h5m_type = { 'line': {'name': 'Edge2', 'type': 1}, 'triangle': {'name': 'Tri3', 'type': 2}, 'tetra': {'name': 'Tet4', 'type': 5} } for key, data in cells.items(): if key not in meshio_to_h5m_type: logging.warning( 'Unsupported H5M element type \'%s\'. Skipping.', key ) continue this_type = meshio_to_h5m_type[key] elem_group = elements.create_group(this_type['name']) elem_group.attrs.create( 'element_type', this_type['type'], dtype=elem_dt ) # h5m node indices are 1-based conn = elem_group.create_dataset('connectivity', data=(data + 1)) conn.attrs.create('start_id', global_id) global_id += len(data) # add cell data if cell_data: tags = elem_group.create_group('tags') for key, value in cell_data.items(): tags.create_dataset(key, data=value) # add empty set -- MOAB wants this sets = tstt.create_group('sets') sets.create_group('tags') # set max_id tstt.attrs.create('max_id', global_id, dtype='u8') return meshio-1.11.7/meshio/helpers.py000066400000000000000000000154771323505575200164120ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import os from . import ansys_io from . import dolfin_io from . import exodus_io from . import h5m_io from . import med_io from . import medit_io from . import gmsh_io from . import off_io from . import permas_io from . import stl_io from . import vtk_io from . import vtu_io from . import xdmf_io input_filetypes = [ 'ansys', 'exodus', 'gmsh-ascii', 'gmsh-binary', 'dolfin-xml', 'med', 'medit', 'permas', 'moab', 'off', 'stl-ascii', 'stl-binary', 'vtk-ascii', 'vtk-binary', 'vtu-ascii', 'vtu-binary', 'xdmf', ] output_filetypes = [ 'ansys-ascii', 'ansys-binary', 'exodus', 'gmsh-ascii', 'gmsh-binary', 'dolfin-xml', 'med', 'medit', 'permas', 'moab', 'off', 'stl-ascii', 'stl-binary', 'vtk-ascii', 'vtk-binary', 'vtu-ascii', 'vtu-binary', 'xdmf', ] _extension_to_filetype = { '.e': 'exodus', '.ex2': 'exodus', '.exo': 'exodus', '.med': 'med', '.mesh': 'medit', '.msh': 'gmsh-binary', '.xml': 'dolfin-xml', '.post': 'permas', '.post.gz': 'permas', '.dato': 'permas', '.dato.gz': 'permas', '.h5m': 'moab', '.off': 'off', '.stl': 'stl-binary', '.vtu': 'vtu-binary', '.vtk': 'vtk-binary', '.xdmf': 'xdmf', '.xmf': 'xdmf', } def read(filename, file_format=None): '''Reads an unstructured mesh with added data. :param filenames: The files to read from. :type filenames: str :returns mesh{2,3}d: The mesh data. :returns point_data: Point data read from file. :type point_data: dict :returns field_data: Field data read from file. :type field_data: dict ''' # https://stackoverflow.com/q/4843173/353337 assert isinstance(filename, str) if not file_format: # deduct file format from extension extension = '.' + filename.split(os.extsep, 1)[-1] file_format = _extension_to_filetype[extension] format_to_reader = { 'ansys': ansys_io, 'ansys-ascii': ansys_io, 'ansys-binary': ansys_io, # 'gmsh': gmsh_io, 'gmsh-ascii': gmsh_io, 'gmsh-binary': gmsh_io, # 'med': med_io, 'medit': medit_io, 'dolfin-xml': dolfin_io, 'permas': permas_io, 'moab': h5m_io, 'off': off_io, # 'stl': stl_io, 'stl-ascii': stl_io, 'stl-binary': stl_io, # 'vtu-ascii': vtu_io, 'vtu-binary': vtu_io, # 'vtk-ascii': vtk_io, 'vtk-binary': vtk_io, # 'xdmf': xdmf_io, 'exodus': exodus_io, } return format_to_reader[file_format].read(filename) def write(filename, points, cells, point_data=None, cell_data=None, field_data=None, file_format=None ): '''Writes mesh together with data to a file. :params filename: File to write to. :type filename: str :params point_data: Named additional point data to write to the file. :type point_data: dict ''' point_data = {} if point_data is None else point_data cell_data = {} if cell_data is None else cell_data field_data = {} if field_data is None else field_data if not file_format: # deduct file format from extension # _, extension = os.path.splitext(filename) extension = '.' + filename.split(os.extsep, 1)[-1] file_format = _extension_to_filetype[extension] # check cells for sanity for key in cells: assert cells[key].shape[1] == gmsh_io.num_nodes_per_cell[key] if file_format == 'moab': h5m_io.write( filename, points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data ) elif file_format in ['ansys-ascii', 'ansys-binary']: ansys_io.write( filename, points, cells, point_data=point_data, cell_data=cell_data, write_binary=(file_format == 'ansys-binary') ) elif file_format in ['gmsh-ascii', 'gmsh-binary']: gmsh_io.write( filename, points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data, write_binary=(file_format == 'gmsh-binary') ) elif file_format == 'med': med_io.write( filename, points, cells, point_data=point_data, cell_data=cell_data) elif file_format == 'medit': medit_io.write(filename, points, cells) elif file_format == 'dolfin-xml': dolfin_io.write(filename, points, cells, cell_data=cell_data) elif file_format == 'off': off_io.write(filename, points, cells) elif file_format == 'permas': permas_io.write(filename, points, cells) elif file_format in ['stl-ascii', 'stl-binary']: stl_io.write( filename, points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data, write_binary=(file_format != 'stl-ascii') ) elif file_format == 'vtu-ascii': vtu_io.write( filename, points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data, write_binary=False ) elif file_format in ['vtu', 'vtu-binary']: vtu_io.write( filename, points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data, write_binary=True ) elif file_format == 'vtk-ascii': vtk_io.write( filename, points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data, write_binary=False ) elif file_format in ['vtk', 'vtk-binary']: vtk_io.write( filename, points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data, write_binary=True ) elif file_format in ['xdmf', 'xdmf3']: # XDMF xdmf_io.write( filename, points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data ) else: assert file_format == 'exodus', ( 'Unknown file format \'{}\' of \'{}\'.' .format(file_format, filename) ) exodus_io.write( filename, points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data ) return meshio-1.11.7/meshio/med_io.py000066400000000000000000000232371323505575200161750ustar00rootroot00000000000000# -*- coding: utf-8 -*- # ''' I/O for MED/Salome, cf. . .. moduleauthor:: Nico Schlömer Tianyi Li ''' import numpy meshio_to_med_type = { 'triangle': 'TR3', 'tetra': 'TE4', 'hexahedron': 'HE8', 'quad': 'QU4', 'vertex': 'PO1', 'line': 'SE2', } med_to_meshio_type = {v: k for k, v in meshio_to_med_type.items()} def read(filename): import h5py f = h5py.File(filename, 'r') # Mesh ensemble ens_maa = f['ENS_MAA'] meshes = ens_maa.keys() assert len(meshes) == 1, 'Must only contain exactly 1 mesh' mesh = ens_maa[list(meshes)[0]] # Possible time-stepping if 'NOE' not in mesh: # One needs NOE (node) and MAI (french maillage, meshing) data. If they # are not available in the mesh, check for time-steppings. ts = mesh.keys() assert len(ts) == 1, 'Must only contain exactly 1 time-step' mesh = mesh[list(ts)[0]] # Points pts_dataset = mesh['NOE']['COO'] number = pts_dataset.attrs['NBR'] points = pts_dataset[()].reshape(-1, number).T if points.shape[1] == 2: points = numpy.column_stack([points, numpy.zeros(len(points))]) # Cells cells = {} mai = mesh['MAI'] for key, med_type in meshio_to_med_type.items(): if med_type in mai: nn = int(med_type[-1]) cells[key] = mai[med_type]['NOD'][()].reshape(nn, -1).T - 1 # Read nodal and cell data if they exist try: cha = f['CHA'] # champs (fields) in french point_data, cell_data, field_data = _read_data(cha) except KeyError: point_data, cell_data, field_data = {}, {}, {} return points, cells, point_data, cell_data, field_data def _read_data(cha): point_data = {} cell_data = {} field_data = {} for name, data in cha.items(): ts = data.keys() data = data[list(ts)[-1]] # only read the last time-step # MED field can contain multiple types of data for supp in data: if supp == 'NOE': # continuous nodal (NOEU) data point_data[name] = _read_nodal_data(data) else: # Gauss points (ELGA) or DG (ELNO) data cell_data = _read_cell_data(cell_data, name, supp, data) return point_data, cell_data, field_data def _read_nodal_data(data): nodal_dataset = data['NOE'][data['NOE'].attrs['PFL']] nbr = nodal_dataset.attrs['NBR'] values = nodal_dataset['CO'][()].reshape(-1, nbr).T if values.shape[1] == 1: # cut off for scalars values = values[:, 0] return values def _read_cell_data(cell_data, name, supp, data): med_type = supp.partition('.')[2] cell_dataset = data[supp][data[supp].attrs['PFL']] nbr = cell_dataset.attrs['NBR'] # number of cells nga = cell_dataset.attrs['NGA'] # number of Gauss points # Only 1 Gauss/elemental nodal point per cell if nga == 1: values = cell_dataset['CO'][()].reshape(-1, nbr).T if values.shape[1] == 1: # cut off for scalars values = values[:, 0] # Multiple Gauss/elemental nodal points per cell # In general at each cell the value shape will be (nco, nga) else: values = cell_dataset['CO'][()].reshape(-1, nbr, nga) values = numpy.swapaxes(values, 0, 1) try: # cell type already exists key = med_to_meshio_type[med_type] cell_data[key][name] = values except KeyError: cell_data[key] = {name: values} return cell_data def write( filename, points, cells, point_data=None, cell_data=None, field_data=None, add_global_ids=True ): import h5py point_data = {} if point_data is None else point_data cell_data = {} if cell_data is None else cell_data field_data = {} if field_data is None else field_data f = h5py.File(filename, 'w') # Strangely the version must be 3.0.x # Any version >= 3.1.0 will NOT work with SALOME 8.3 info = f.create_group('INFOS_GENERALES') info.attrs.create('MAJ', 3) info.attrs.create('MIN', 0) info.attrs.create('REL', 0) # Meshes ens_maa = f.create_group('ENS_MAA') mesh_name = 'mesh' mesh = ens_maa.create_group(mesh_name) mesh.attrs.create('DIM', points.shape[1]) # mesh dimension mesh.attrs.create('ESP', points.shape[1]) # spatial dimension mesh.attrs.create('REP', 0) # cartesian coordinate system (repère in french) mesh.attrs.create('UNT', b'') # time unit mesh.attrs.create('UNI', b'') # spatial unit mesh.attrs.create('SRT', 1) # sorting type MED_SORT_ITDT mesh.attrs.create('NOM', _comp_nom(points.shape[1]).encode('ascii')) # component names mesh.attrs.create('DES', b'Mesh created with meshio') mesh.attrs.create('TYP', 0) # mesh type (MED_NON_STRUCTURE) # Time-step step = '-0000000000000000001-0000000000000000001' # NDT NOR ts = mesh.create_group(step) ts.attrs.create('CGT', 1) ts.attrs.create('NDT', -1) # no time step (-1) ts.attrs.create('NOR', -1) # no iteration step (-1) ts.attrs.create('PDT', -1.0) # current time # Points noe_group = ts.create_group('NOE') noe_group.attrs.create('CGT', 1) noe_group.attrs.create('CGS', 1) pfl = 'MED_NO_PROFILE_INTERNAL' noe_group.attrs.create('PFL', pfl.encode('ascii')) coo = noe_group.create_dataset('COO', data=points.T.flatten()) coo.attrs.create('CGT', 1) coo.attrs.create('NBR', len(points)) # Cells (mailles in french) mai_group = ts.create_group('MAI') mai_group.attrs.create('CGT', 1) for key, med_type in meshio_to_med_type.items(): if key in cells: mai = mai_group.create_group(med_type) mai.attrs.create('CGT', 1) mai.attrs.create('CGS', 1) mai.attrs.create('PFL', pfl.encode('ascii')) nod = mai.create_dataset('NOD', data=cells[key].T.flatten() + 1) nod.attrs.create('CGT', 1) nod.attrs.create('NBR', len(cells[key])) # Subgroups (familles in french) fas = f.create_group('FAS') fm = fas.create_group(mesh_name) fz = fm.create_group('FAMILLE_ZERO') # must be defined in any case fz.attrs.create('NUM', 0) # Write nodal/cell data if point_data or cell_data: cha = f.create_group('CHA') # Nodal data for name, data in point_data.items(): supp = 'NOEU' # nodal data _write_data(cha, mesh_name, pfl, name, supp, data) # Cell data # Only support writing ELEM fields with only 1 Gauss point per cell # Or ELNO (DG) fields defined at every node per cell for cell_type, d in cell_data.items(): for name, data in d.items(): # Determine the nature of the cell data # Either data.shape = (nbr, ) or (nbr, nco) -> ELEM # or data.shape = (nbr, nco, nga) -> ELNO or ELGA med_type = meshio_to_med_type[cell_type] nn = int(med_type[-1]) # number of nodes per cell if data.ndim <= 2: supp = 'ELEM' elif data.shape[2] == nn: supp = 'ELNO' else: # general ELGA data defined at unknown Gauss points supp = 'ELGA' _write_data(cha, mesh_name, pfl, name, supp, data, med_type) return def _write_data(cha, mesh_name, pfl, name, supp, data, med_type=None): # Skip for general ELGA fields defined at unknown Gauss points if supp == 'ELGA': return # Field try: # a same MED field may contain fields of different natures field = cha.create_group(name) field.attrs.create('MAI', mesh_name.encode('ascii')) field.attrs.create('TYP', 6) # MED_FLOAT64 field.attrs.create('UNI', b'') # physical unit field.attrs.create('UNT', b'') # time unit nco = 1 if data.ndim == 1 else data.shape[1] field.attrs.create('NCO', nco) # number of components field.attrs.create('NOM', _comp_nom(nco).encode('ascii')) # Time-step step = '0000000000000000000100000000000000000001' ts = field.create_group(step) ts.attrs.create('NDT', 1) # time step 1 ts.attrs.create('NOR', 1) # iteration step 1 ts.attrs.create('PDT', 0.0) # current time ts.attrs.create('RDT', -1) # NDT of the mesh ts.attrs.create('ROR', -1) # NOR of the mesh except ValueError: # name already exists field = cha[name] ts_name = list(field.keys())[-1] ts = field[ts_name] # Field information if supp == 'NOEU': typ = ts.create_group('NOE') elif supp == 'ELNO': typ = ts.create_group('NOE.' + med_type) else: # 'ELEM' with only 1 Gauss points! typ = ts.create_group('MAI.' + med_type) typ.attrs.create('GAU', b'') # no associated Gauss points typ.attrs.create('PFL', pfl.encode('ascii')) pfl = typ.create_group(pfl) pfl.attrs.create('NBR', len(data)) # number of points if supp == 'ELNO': pfl.attrs.create('NGA', data.shape[2]) else: pfl.attrs.create('NGA', 1) pfl.attrs.create('GAU', b'') # Data if supp == 'NOEU' or supp == 'ELEM': pfl.create_dataset('CO', data=data.T.flatten()) else: # ELNO fields data = numpy.swapaxes(data, 0, 1) pfl.create_dataset('CO', data=data.flatten()) def _comp_nom(nco): ''' To be correctly read in a MED viewer, each component must be a string of width 16. Since we do not know the physical nature of the data, we just use V1, V2, ... ''' return ''.join(['V%-15d' % (i+1) for i in range(nco)]) meshio-1.11.7/meshio/medit_io.py000066400000000000000000000103341323505575200165240ustar00rootroot00000000000000# -*- coding: utf-8 -*- # ''' I/O for Medit's format, cf. . Check out for something like a specification. .. moduleauthor:: Nico Schlömer ''' from itertools import islice import logging import numpy def read(filename): with open(filename) as f: points, cells = read_buffer(f) return points, cells, {}, {}, {} def read_buffer(f): dim = 0 cells = {} while True: try: line = next(islice(f, 1)) except StopIteration: break stripped = line.strip() # skip comments and empty lines if not stripped or stripped[0] == '#': continue assert stripped[0].isalpha() keyword = stripped.split(' ')[0] meshio_from_medit = { 'Edges': ('line', 2), 'Triangles': ('triangle', 3), 'Quadrilaterals': ('quad', 4), 'Tetrahedra': ('tetra', 4), 'Hexahedra': ('hexahedra', 8) } if keyword == 'MeshVersionFormatted': assert stripped[-1] == '1' elif keyword == 'Dimension': dim = int(stripped[-1]) elif keyword == 'Vertices': assert dim > 0 # The first line is the number of nodes line = next(islice(f, 1)) num_verts = int(line) points = numpy.empty((num_verts, dim), dtype=float) for k, line in enumerate(islice(f, num_verts)): # Throw away the label immediately points[k] = numpy.array(line.split(), dtype=float)[:-1] elif keyword in meshio_from_medit: meshio_name, num = meshio_from_medit[keyword] # The first line is the number of elements line = next(islice(f, 1)) num_cells = int(line) cell_data = numpy.empty((num_cells, num), dtype=int) for k, line in enumerate(islice(f, num_cells)): data = numpy.array(line.split(), dtype=int) # Throw away the label cell_data[k] = data[:-1] # adapt 0-base cells[meshio_name] = cell_data - 1 else: assert keyword == 'End', 'Unknown keyword \'{}\'.'.format(keyword) return points, cells def write( filename, points, cells, point_data=None, cell_data=None, field_data=None ): point_data = {} if point_data is None else point_data cell_data = {} if cell_data is None else cell_data field_data = {} if field_data is None else field_data with open(filename, 'wb') as fh: fh.write(b'MeshVersionFormatted 1\n') fh.write(b'# Created by meshio\n') # Dimension info d = '\nDimension {}\n'.format(points.shape[1]) fh.write(d.encode('utf-8')) # vertices fh.write(b'\nVertices\n') fh.write('{}\n'.format(len(points)).encode('utf-8')) labels = numpy.ones(len(points), dtype=int) data = numpy.c_[points, labels] fmt = ' '.join(['%r'] * points.shape[1]) + ' %d' numpy.savetxt(fh, data, fmt) medit_from_meshio = { 'line': ('Edges', 2), 'triangle': ('Triangles', 3), 'quad': ('Quadrilaterals', 4), 'tetra': ('Tetrahedra', 4), 'hexahedra': ('Hexahedra', 8) } for key, data in cells.items(): try: medit_name, num = medit_from_meshio[key] except KeyError: msg = ( 'MEDIT\'s mesh format doesn\'t know {} cells. Skipping.' ).format(key) logging.warning(msg) continue fh.write(b'\n') fh.write('{}\n'.format(medit_name).encode('utf-8')) fh.write('{}\n'.format(len(data)).encode('utf-8')) labels = numpy.ones(len(data), dtype=int) # adapt 1-base data_with_label = numpy.c_[data + 1, labels] fmt = ' '.join(['%d'] * (num + 1)) numpy.savetxt(fh, data_with_label, fmt) fh.write(b'\nEnd\n') return meshio-1.11.7/meshio/off_io.py000066400000000000000000000060401323505575200161730ustar00rootroot00000000000000# -*- coding: utf-8 -*- # ''' I/O for the OFF surface format, cf. . .. moduleauthor:: Nico Schlömer ''' from itertools import islice import numpy def read(filename): with open(filename) as f: points, cells = read_buffer(f) return points, cells, {}, {}, {} def read_buffer(f): # assert that the first line reads `OFF` line = next(islice(f, 1)) assert line.strip() == 'OFF' # fast forward to the next significant line while True: line = next(islice(f, 1)) stripped = line.strip() if stripped and stripped[0] != '#': break # This next line contains: # # 2775 5558 0 num_verts, num_faces, num_edges = stripped.split(' ') num_verts = int(num_verts) num_faces = int(num_faces) num_edges = int(num_edges) verts = numpy.empty((num_verts, 3), dtype=float) # read vertices k = 0 while True: if k >= num_verts: break try: line = next(islice(f, 1)) except StopIteration: break stripped = line.strip() # skip comments and empty lines if not stripped or stripped[0] == '#': continue x, y, z = stripped.split() verts[k] = [float(x), float(y), float(z)] k += 1 # read cells triangles = [] k = 0 while True: if k >= num_faces: break try: line = next(islice(f, 1)) except StopIteration: break stripped = line.strip() # skip comments and empty lines if not stripped or stripped[0] == '#': continue data = stripped.split() num_points = int(data[0]) assert num_points == len(data) - 1 assert num_points == 3, 'Can only handle triangular faces' data = [int(data[1]), int(data[2]), int(data[3])] triangles.append(data) cells = {} if triangles: cells['triangle'] = numpy.array(triangles) return verts, cells def write( filename, points, cells, point_data=None, cell_data=None, field_data=None ): point_data = {} if point_data is None else point_data cell_data = {} if cell_data is None else cell_data field_data = {} if field_data is None else field_data for key in cells: assert key in ['triangle'], 'Can only deal with triangular faces' tri = cells['triangle'] with open(filename, 'wb') as fh: fh.write(b'OFF\n') fh.write(b'# Created by meshio\n\n') # counts c = '{} {} {}\n\n'.format(len(points), len(tri), 0) fh.write(c.encode('utf-8')) # vertices numpy.savetxt(fh, points, '%r') # triangles data_with_label = numpy.c_[ tri.shape[1] * numpy.ones(tri.shape[0]), tri ] numpy.savetxt(fh, data_with_label, '%d %d %d %d') return meshio-1.11.7/meshio/permas_io.py000066400000000000000000000146361323505575200167220ustar00rootroot00000000000000# -*- coding: utf-8 -*- # ''' I/O for PERMAS dat format, cf. .. moduleauthor:: Nils Wagner ''' import gzip import re import numpy from .__about__ import __version__, __website__ def read(filename): '''Reads a (compressed) PERMAS dato or post file. ''' # The format is specified at # . cells = {} meshio_to_permas_type = { 'vertex': (1, 'PLOT1'), 'line': (2, 'PLOTL2'), 'triangle': (3, 'TRIA3'), 'quad': (4, 'QUAD4'), 'tetra': (4, 'TET4'), 'hexahedron': (8, 'HEXE8'), 'wedge': (6, 'PENTA6'), 'pyramid': (5, 'PYRA5') } if filename.endswith('.gz') or filename.endswith('.gz'): opener = gzip.open else: # assert filename.endswith('dato') or filename.endswith('post') opener = open with opener(filename, 'r') as f: while True: line = f.readline() if not line or re.search('\\$END STRUCTURE', line): break for meshio_type, permas_ele in meshio_to_permas_type.items(): num_nodes = permas_ele[0] permas_type = permas_ele[1] if re.search('\\$ELEMENT TYPE = {}'.format(permas_type), line): while True: line = f.readline() if not line or line.startswith('!'): break data = numpy.array(line.split(), dtype=int) if meshio_type in cells: cells[meshio_type].append(data[-num_nodes:]) else: cells[meshio_type] = [data[-num_nodes:]] if re.search('\\$COOR', line): points = [] while True: line = f.readline() if not line or line.startswith('!'): break for r in numpy.array(line.split(), dtype=float)[1:]: points.append(r) points = numpy.array(points) points = numpy.reshape(points, newshape=(len(points)//3, 3)) for key in cells: # Subtract one to account for the fact that python indices # are 0-based. cells[key] = numpy.array(cells[key], dtype=int) - 1 return points, cells, {}, {}, {} def write( filename, points, cells, point_data=None, cell_data=None, field_data=None ): '''Writes PERMAS dat files, cf. http://www.intes.de # PERMAS-ASCII-file-format ''' point_data = {} if point_data is None else point_data cell_data = {} if cell_data is None else cell_data field_data = {} if field_data is None else field_data with open(filename, 'w') as fh: fh.write('!\n') fh.write('! File written by meshio version {}\n'.format(__version__)) fh.write('! Further information available at {}\n'.format(__website__)) fh.write('!\n') fh.write('$ENTER COMPONENT NAME = DFLT_COMP DOFTYPE = DISP MATH\n') fh.write('! \n') fh.write(' $SITUATION NAME = REAL_MODES\n') fh.write( ' DFLT_COMP SYSTEM =' ' NSV CONSTRAINTS = SPCVAR_1 ! LOADING = LOADVAR_1\n' ) fh.write(' $END SITUATION\n') fh.write('! \n') fh.write(' $STRUCTURE\n') fh.write('! \n') # Write nodes fh.write(' $COOR NSET = ALL_NODES\n') for k, x in enumerate(points): fh.write( ' {:8d} {:+.15f} {:+.15f} {:+.15f}\n'.format( k+1, x[0], x[1], x[2] )) meshio_to_permas_type = { 'vertex': (1, 'PLOT1'), 'line': (2, 'PLOTL2'), 'triangle': (3, 'TRIA3'), 'quad': (4, 'QUAD4'), 'tetra': (4, 'TET4'), 'hexahedron': (8, 'HEXE8'), 'wedge': (6, 'PENTA6'), 'pyramid': (5, 'PYRA5') } # # Avoid non-unique element numbers in case of multiple element types by # num_ele !!! # num_ele = 0 for meshio_type, cell in cells.items(): numcells, num_local_nodes = cell.shape permas_type = meshio_to_permas_type[meshio_type] fh.write('!\n') fh.write( ' $ELEMENT TYPE = {} ESET = {}\n'.format( permas_type[1], permas_type[1] )) for k, c in enumerate(cell): form = ' %8d ' + \ ' '.join(num_local_nodes * ['%8d']) + \ '\n' fh.write(form % ((k+num_ele+1,) + tuple(c + 1))) num_ele += numcells fh.write('!\n') fh.write(' $END STRUCTURE\n') fh.write('!\n') elem_3D = ['HEXE8', 'TET4', 'PENTA6', 'PYRA5'] elem_2D = ['TRIA3', 'QUAD4'] elem_1D = ['PLOT1', 'PLOTL2'] fh.write(' $SYSTEM NAME = NSV\n') fh.write('!\n') fh.write(' $ELPROP\n') for meshio_type, cell in cells.items(): permas_type = meshio_to_permas_type[meshio_type] if permas_type[1] in elem_3D: fh.write( ' {} MATERIAL = DUMMY_MATERIAL\n'.format( permas_type[1] )) elif permas_type[1] in elem_2D: fh.write( 12 * ' ' + '{} GEODAT = GD_{} MATERIAL = DUMMY_MATERIAL\n'.format( permas_type[1], permas_type[1] )) else: assert permas_type[1] in elem_1D fh.write('!\n') fh.write(' $GEODAT SHELL CONT = THICK NODES = ALL\n') for meshio_type, cell in cells.items(): permas_type = meshio_to_permas_type[meshio_type] if permas_type[1] in elem_2D: fh.write(12 * ' ' + 'GD_{} 1.0\n'.format(permas_type[1])) fh.write('''! ! $END SYSTEM ! $CONSTRAINTS NAME = SPCVAR_1 $END CONSTRAINTS ! $LOADING NAME = LOADVAR_1 $END LOADING ! $EXIT COMPONENT ! $ENTER MATERIAL ! $MATERIAL NAME = DUMMY_MATERIAL TYPE = ISO ! $ELASTIC GENERAL INPUT = DATA 2.0E+05 0.3 ! $DENSITY GENERAL INPUT = DATA 7.8E-09 ! $THERMEXP GENERAL INPUT = DATA 1.2E-05 ! $END MATERIAL ! $EXIT MATERIAL ! $FIN ''') return meshio-1.11.7/meshio/stl_io.py000066400000000000000000000115361323505575200162310ustar00rootroot00000000000000# -*- coding: utf-8 -*- # ''' I/O for the STL format, cf. . .. moduleauthor:: Nico Schlömer ''' import numpy def read(filename): '''Reads a Gmsh msh file. ''' with open(filename, 'rb') as f: out = read_buffer(f) return out def read_buffer(f): line = f.readline().decode('utf-8') if line[:5] == 'solid': return _read_ascii(f) return _read_binary(f) def _read_ascii(f): facets = [] while True: line = f.readline().decode('utf-8') if line[:8] == 'endsolid': break line = line.strip() assert line[:5] == 'facet' facets.append(_read_facet(f)) line = f.readline().decode('utf-8') assert line.strip() == 'endfacet' # Now, all facets contain the point coordinate. Try to identify individual # points and build the data arrays. points, cells = data_from_facets(facets) return points, cells, {}, {}, {} def _read_facet(f): line = f.readline().decode('utf-8') assert line.strip() == 'outer loop' facet = numpy.empty((3, 3)) flt = numpy.vectorize(float) for k in range(3): parts = f.readline().decode('utf-8').split() assert len(parts) == 4 assert parts[0] == 'vertex' facet[k] = flt(parts[1:]) line = f.readline().decode('utf-8') assert line.strip() == 'endloop' return facet def data_from_facets(facets): # Now, all facets contain the point coordinate. Try to identify individual # points and build the data arrays. pts = numpy.concatenate(facets) # TODO equip `unique()` with a tolerance # Use return_index so we can use sort on `idx` such that the order is # preserved; see . _, idx, inv = numpy.unique( pts, axis=0, return_index=True, return_inverse=True ) k = numpy.argsort(idx) points = pts[idx[k]] inv_k = numpy.argsort(k) cells = {'triangle': inv_k[inv].reshape(-1, 3)} return points, cells def _read_binary(f): # read the first uint32 byte to get the number of triangles data = numpy.fromstring(f.read(4), dtype=numpy.uint32) num_triangles = data[0] facets = [] for _ in range(num_triangles): # discard the normal f.read(12) facets.append( numpy.fromstring(f.read(36), dtype=numpy.float32).reshape(-1, 3) ) # discard the attribute byte count f.read(2) points, cells = data_from_facets(numpy.array(facets)) return points, cells, {}, {}, {} def write( filename, points, cells, point_data=None, cell_data=None, field_data=None, write_binary=False, ): assert not point_data, \ 'STL cannot write point data.' assert not field_data, \ 'STL cannot write field data.' assert len(cells.keys()) == 1 and list(cells.keys())[0] == 'triangle', \ 'STL can only write triangle cells.' if write_binary: _write_binary(filename, points, cells) else: _write_ascii(filename, points, cells) return def _compute_normals(pts): normals = numpy.cross(pts[:, 1] - pts[:, 0], pts[:, 2] - pts[:, 0]) nrm = numpy.sqrt(numpy.einsum('ij,ij->i', normals, normals)) normals = (normals.T / nrm).T return normals def _write_ascii(filename, points, cells): pts = points[cells['triangle']] normals = _compute_normals(pts) with open(filename, 'wb') as fh: fh.write('solid\n'.encode('utf-8')) for local_pts, normal in zip(pts, normals): # facet normal 0.455194 -0.187301 -0.870469 # outer loop # vertex 266.36 234.594 14.6145 # vertex 268.582 234.968 15.6956 # vertex 267.689 232.646 15.7283 # endloop # endfacet fh.write('facet normal {} {} {}\n'.format(*normal).encode('utf-8')) fh.write(' outer loop\n'.encode('utf-8')) for pt in local_pts: fh.write(' vertex {} {} {}\n'.format(*pt).encode('utf-8')) fh.write(' endloop\n'.encode('utf-8')) fh.write('endfacet\n'.encode('utf-8')) fh.write('endsolid\n'.encode('utf-8')) return def _write_binary(filename, points, cells): pts = points[cells['triangle']] normals = _compute_normals(pts) with open(filename, 'wb') as fh: # 80 character header data msg = 'This file was generated by meshio.' msg += (80 - len(msg)) * 'X' msg += '\n' fh.write(msg.encode('utf-8')) fh.write(numpy.uint32(len(cells['triangle']))) for pt, normal in zip(pts, normals): fh.write(normal.astype(numpy.float32)) fh.write(pt.astype(numpy.float32)) fh.write(numpy.uint16(0)) return meshio-1.11.7/meshio/vtk_io.py000066400000000000000000000361241323505575200162330ustar00rootroot00000000000000# -*- coding: utf-8 -*- # ''' I/O for VTK . .. moduleauthor:: Nico Schlömer ''' import logging import numpy from .__about__ import __version__ # https://www.vtk.org/doc/nightly/html/vtkCellType_8h_source.html vtk_to_meshio_type = { 0: 'empty', 1: 'vertex', # 2: 'poly_vertex', 3: 'line', # 4: 'poly_line', 5: 'triangle', # 6: 'triangle_strip', # 7: 'polygon', # 8: 'pixel', 9: 'quad', 10: 'tetra', # 11: 'voxel', 12: 'hexahedron', 13: 'wedge', 14: 'pyramid', 15: 'penta_prism', 16: 'hexa_prism', 21: 'line3', 22: 'triangle6', 23: 'quad8', 24: 'tetra10', 25: 'hexahedron20', 26: 'wedge15', 27: 'pyramid13', 28: 'quad9', 29: 'hexahedron27', 30: 'quad6', 31: 'wedge12', 32: 'wedge18', 33: 'hexahedron24', 34: 'triangle7', 35: 'line4', # # 60: VTK_HIGHER_ORDER_EDGE, # 61: VTK_HIGHER_ORDER_TRIANGLE, # 62: VTK_HIGHER_ORDER_QUAD, # 63: VTK_HIGHER_ORDER_POLYGON, # 64: VTK_HIGHER_ORDER_TETRAHEDRON, # 65: VTK_HIGHER_ORDER_WEDGE, # 66: VTK_HIGHER_ORDER_PYRAMID, # 67: VTK_HIGHER_ORDER_HEXAHEDRON, } meshio_to_vtk_type = {v: k for k, v in vtk_to_meshio_type.items()} # These are all VTK data types. One sometimes finds 'vtktypeint64', but # this is ill-formed. vtk_to_numpy_dtype = { 'bit': numpy.dtype('bool'), 'unsigned_char': numpy.dtype('uint8'), 'char': numpy.dtype('int8'), 'unsigned_short': numpy.dtype('uint16'), 'short': numpy.dtype('int16'), 'unsigned_int': numpy.dtype('uint32'), 'int': numpy.dtype('int32'), 'unsigned_long': numpy.dtype('int64'), 'long': numpy.dtype('int64'), 'float': numpy.dtype('float32'), 'double': numpy.dtype('float64'), } numpy_to_vtk_dtype = {v: k for k, v in vtk_to_numpy_dtype.items()} def read(filename): '''Reads a Gmsh msh file. ''' with open(filename, 'rb') as f: out = read_buffer(f) return out def read_buffer(f): # initialize output data points = None field_data = {} cell_data_raw = {} point_data = {} # skip header and title f.readline() f.readline() data_type = f.readline().decode('utf-8').strip() assert data_type in ['ASCII', 'BINARY'], \ 'Unknown VTK data type \'{}\'.'.format(data_type) is_ascii = data_type == 'ASCII' c = None offsets = None ct = None # One of the problem in reading VTK files are POINT_DATA and CELL_DATA # fields. They can contain a number of SCALARS+LOOKUP_TABLE tables, without # giving and indication of how many there are. Hence, SCALARS must be # treated like a first-class section. To associate it with POINT/CELL_DATA, # we store the `active` section in this variable. active = None while True: line = f.readline().decode('utf-8') if not line: # EOF break line = line.strip() # pylint: disable=len-as-condition if len(line) == 0: continue split = line.split() section = split[0] if section == 'DATASET': dataset_type = split[1] assert dataset_type == 'UNSTRUCTURED_GRID', \ 'Only VTK UNSTRUCTURED_GRID supported.' elif section == 'POINTS': active = 'POINTS' num_points = int(split[1]) data_type = split[2] dtype = vtk_to_numpy_dtype[data_type] if is_ascii: points = numpy.fromfile( f, count=num_points*3, sep=' ', dtype=dtype ) else: # binary num_bytes = numpy.dtype(dtype).itemsize total_num_bytes = num_points * (3 * num_bytes) # Binary data is big endian, see # . dtype = dtype.newbyteorder('>') points = \ numpy.fromstring(f.read(total_num_bytes), dtype=dtype) line = f.readline().decode('utf-8') assert line == '\n' points = points.reshape((num_points, 3)) elif section == 'CELLS': active = 'CELLS' num_items = int(split[2]) if is_ascii: c = numpy.fromfile(f, count=num_items, sep=' ', dtype=int) else: # binary num_bytes = 4 total_num_bytes = num_items * num_bytes c = numpy.fromstring(f.read(total_num_bytes), dtype='>i4') line = f.readline().decode('utf-8') assert line == '\n' offsets = [] if len(c) > 0: offsets.append(0) while offsets[-1] + c[offsets[-1]] + 1 < len(c): offsets.append(offsets[-1] + c[offsets[-1]] + 1) offsets = numpy.array(offsets) elif section == 'CELL_TYPES': active = 'CELL_TYPES' num_items = int(split[1]) if is_ascii: ct = \ numpy.fromfile(f, count=int(num_items), sep=' ', dtype=int) else: # binary num_bytes = 4 total_num_bytes = num_items * num_bytes ct = numpy.fromstring(f.read(total_num_bytes), dtype='>i4') line = f.readline().decode('utf-8') assert line == '\n' elif section == 'POINT_DATA': active = 'POINT_DATA' num_items = int(split[1]) elif section == 'CELL_DATA': active = 'CELL_DATA' num_items = int(split[1]) elif section == 'SCALARS': if active == 'POINT_DATA': d = point_data else: assert active == 'CELL_DATA', \ 'Illegal SCALARS in section \'{}\'.'.format(active) d = cell_data_raw d.update(_read_scalar_field(f, num_items, split)) elif section == 'VECTORS': if active == 'POINT_DATA': d = point_data else: assert active == 'CELL_DATA', \ 'Illegal SCALARS in section \'{}\'.'.format(active) d = cell_data_raw d.update(_read_vector_field(f, num_items, split)) elif section == 'TENSORS': if active == 'POINT_DATA': d = point_data else: assert active == 'CELL_DATA', \ 'Illegal SCALARS in section \'{}\'.'.format(active) d = cell_data_raw d.update(_read_tensor_field(f, num_items, split)) else: assert section == 'FIELD', \ 'Unknown section \'{}\'.'.format(section) if active == 'POINT_DATA': d = point_data else: assert active == 'CELL_DATA', \ 'Illegal FIELD in section \'{}\'.'.format(active) d = cell_data_raw d.update(_read_fields(f, int(split[2]), is_ascii)) assert c is not None, \ 'Required section CELLS not found.' assert ct is not None, \ 'Required section CELL_TYPES not found.' cells, cell_data = translate_cells(c, offsets, ct, cell_data_raw) return points, cells, point_data, cell_data, field_data def _read_scalar_field(f, num_data, split): data_name = split[1] data_type = split[2] try: num_comp = int(split[3]) except IndexError: num_comp = 1 # The standard says: # > The parameter numComp must range between (1,4) inclusive; [...] assert 0 < num_comp < 5 dtype = vtk_to_numpy_dtype[data_type] lt, _ = f.readline().decode('utf-8').split() assert lt == 'LOOKUP_TABLE' data = numpy.fromfile(f, count=num_data, sep=' ', dtype=dtype) return {data_name: data} def _read_vector_field(f, num_data, split): data_name = split[1] data_type = split[2] dtype = vtk_to_numpy_dtype[data_type] data = numpy.fromfile( f, count=3*num_data, sep=' ', dtype=dtype ).reshape(-1, 3) return {data_name: data} def _read_tensor_field(f, num_data, split): data_name = split[1] data_type = split[2] dtype = vtk_to_numpy_dtype[data_type] data = numpy.fromfile( f, count=9*num_data, sep=' ', dtype=dtype ).reshape(-1, 3, 3) return {data_name: data} def _read_fields(f, num_fields, is_ascii): data = {} for _ in range(num_fields): name, shape0, shape1, data_type = \ f.readline().decode('utf-8').split() shape0 = int(shape0) shape1 = int(shape1) dtype = vtk_to_numpy_dtype[data_type] if is_ascii: dat = numpy.fromfile( f, count=shape0 * shape1, sep=' ', dtype=dtype ) else: # binary num_bytes = numpy.dtype(dtype).itemsize total_num_bytes = shape0 * shape1 * num_bytes # Binary data is big endian, see # . dtype = dtype.newbyteorder('>') dat = numpy.fromstring(f.read(total_num_bytes), dtype=dtype) line = f.readline().decode('utf-8') assert line == '\n' if shape0 != 1: dat = dat.reshape((shape1, shape0)) data[name] = dat return data def raw_from_cell_data(cell_data): # merge cell data cell_data_raw = {} for d in cell_data.values(): for name, values in d.items(): if name in cell_data_raw: cell_data_raw[name].append(values) else: cell_data_raw[name] = [values] for name in cell_data_raw: cell_data_raw[name] = numpy.concatenate(cell_data_raw[name]) return cell_data_raw def translate_cells(data, offsets, types, cell_data_raw): # Translate it into the cells dictionary. # `data` is a one-dimensional vector with # (num_points0, p0, p1, ... ,pk, numpoints1, p10, p11, ..., p1k, ... # Collect types into bins. # See for better # alternatives. uniques = numpy.unique(types) bins = {u: numpy.where(types == u)[0] for u in uniques} cells = {} cell_data = {} for tpe, b in bins.items(): meshio_type = vtk_to_meshio_type[tpe] n = data[offsets[b[0]]] assert (data[offsets[b]] == n).all() indices = numpy.array([ numpy.arange(1, n+1) + o for o in offsets[b] ]) cells[meshio_type] = data[indices] cell_data[meshio_type] = \ {key: value[b] for key, value in cell_data_raw.items()} return cells, cell_data def write(filename, points, cells, point_data=None, cell_data=None, field_data=None, write_binary=True ): if not write_binary: logging.warning('VTK ASCII files are only meant for debugging.') point_data = {} if point_data is None else point_data cell_data = {} if cell_data is None else cell_data field_data = {} if field_data is None else field_data with open(filename, 'wb') as f: f.write('# vtk DataFile Version 4.2\n'.encode('utf-8')) f.write('written by meshio v{}\n'.format(__version__).encode('utf-8')) f.write(('BINARY\n' if write_binary else 'ASCII\n').encode('utf-8')) f.write('DATASET UNSTRUCTURED_GRID\n'.encode('utf-8')) # write points and cells _write_points(f, points, write_binary) _write_cells(f, cells, write_binary) # write point data if point_data: num_points = len(points) f.write('POINT_DATA {}\n'.format(num_points).encode('utf-8')) _write_field_data(f, point_data, write_binary) # write cell data if cell_data: total_num_cells = sum([len(c) for c in cells.values()]) cell_data_raw = raw_from_cell_data(cell_data) f.write('CELL_DATA {}\n'.format(total_num_cells).encode('utf-8')) _write_field_data(f, cell_data_raw, write_binary) return def _write_points(f, points, write_binary): f.write( 'POINTS {} {}\n'.format( len(points), numpy_to_vtk_dtype[points.dtype] ).encode('utf-8')) if write_binary: # Binary data must be big endian, see # . points.astype(points.dtype.newbyteorder('>')).tofile(f, sep='') else: # ascii points.tofile(f, sep=' ') f.write('\n'.encode('utf-8')) return def _write_cells(f, cells, write_binary): total_num_cells = sum([len(c) for c in cells.values()]) total_num_idx = sum([numpy.prod(c.shape) for c in cells.values()]) # For each cell, the number of nodes is stored total_num_idx += total_num_cells f.write( 'CELLS {} {}\n'.format(total_num_cells, total_num_idx) .encode('utf-8')) if write_binary: for key in cells: n = cells[key].shape[1] d = numpy.column_stack([ numpy.full(len(cells[key]), n), cells[key] ]).astype(numpy.dtype('>i4')) f.write(d.tostring()) if write_binary: f.write('\n'.encode('utf-8')) else: # ascii for key in cells: n = cells[key].shape[1] for cell in cells[key]: f.write((' '.join([ '{}'.format(idx) for idx in numpy.concatenate([[n], cell]) ]) + '\n').encode('utf-8')) # write cell types f.write('CELL_TYPES {}\n'.format(total_num_cells).encode('utf-8')) if write_binary: for key in cells: d = numpy.full( len(cells[key]), meshio_to_vtk_type[key] ).astype(numpy.dtype('>i4')) f.write(d.tostring()) f.write('\n'.encode('utf-8')) else: # ascii for key in cells: for _ in range(len(cells[key])): f.write( '{}\n'.format(meshio_to_vtk_type[key]).encode('utf-8') ) return def _write_field_data(f, data, write_binary): f.write(( 'FIELD FieldData {}\n'.format(len(data)) ).encode('utf-8')) for name, values in data.items(): if len(values.shape) == 1: num_tuples = values.shape[0] num_components = 1 else: assert len(values.shape) == 2, \ 'Only one and two-dimensional field data supported.' num_tuples = values.shape[0] num_components = values.shape[1] f.write(('{} {} {} {}\n'.format( name, num_components, num_tuples, numpy_to_vtk_dtype[values.dtype] )).encode('utf-8')) if write_binary: values.astype(values.dtype.newbyteorder('>')).tofile(f, sep='') else: # ascii values.tofile(f, sep=' ') # numpy.savetxt(f, points) f.write('\n'.encode('utf-8')) return meshio-1.11.7/meshio/vtu_io.py000066400000000000000000000347761323505575200162600ustar00rootroot00000000000000# -*- coding: utf-8 -*- # ''' I/O for VTU. .. moduleauthor:: Nico Schlömer ''' import base64 import logging try: from StringIO import cStringIO as BytesIO except ImportError: from io import BytesIO import sys # lxml cannot parse large files and instead throws the exception # # lxml.etree.XMLSyntaxError: xmlSAX2Characters: huge text node, [...] # # Use Python's native xml parser to avoid this error. import xml.etree.cElementTree as ET import zlib import numpy from .__about__ import __version__ from .vtk_io import vtk_to_meshio_type, meshio_to_vtk_type, raw_from_cell_data from .gmsh_io import num_nodes_per_cell def num_bytes_to_num_base64_chars(num_bytes): # Rounding up in integer division works by double negation since Python # always rounds down. return -(-num_bytes // 3) * 4 def _cells_from_data(connectivity, offsets, types, cell_data_raw): # Translate it into the cells dictionary. # `connectivity` is a one-dimensional vector with # (p0, p1, ... ,pk, p10, p11, ..., p1k, ... # Collect types into bins. # See for better # alternatives. uniques = numpy.unique(types) bins = {u: numpy.where(types == u)[0] for u in uniques} assert len(offsets) == len(types) cells = {} cell_data = {} for tpe, b in bins.items(): meshio_type = vtk_to_meshio_type[tpe] n = num_nodes_per_cell[meshio_type] indices = numpy.array([ # The offsets point to the _end_ of the indices numpy.arange(n) + o - n for o in offsets[b] ]) cells[meshio_type] = connectivity[indices] cell_data[meshio_type] = \ {key: value[b] for key, value in cell_data_raw.items()} return cells, cell_data vtu_to_numpy_type = { 'Float32': numpy.dtype(numpy.float32), 'Float64': numpy.dtype(numpy.float64), 'Int8': numpy.dtype(numpy.int8), 'Int16': numpy.dtype(numpy.int16), 'Int32': numpy.dtype(numpy.int32), 'Int64': numpy.dtype(numpy.int64), 'UInt8': numpy.dtype(numpy.uint8), 'UInt16': numpy.dtype(numpy.uint16), 'UInt32': numpy.dtype(numpy.uint32), 'UInt64': numpy.dtype(numpy.uint64), } numpy_to_vtu_type = {v: k for k, v in vtu_to_numpy_type.items()} # pylint: disable=too-many-instance-attributes class VtuReader(object): '''Helper class for reading VTU files. Some properties are global to the file (e.g., byte_order), and instead of passing around these parameters, make them properties of this class. ''' def __init__(self, filename): points = None point_data = {} cell_data_raw = {} cells = {} field_data = {} tree = ET.parse(filename) root = tree.getroot() assert root.tag == 'VTKFile' assert root.attrib['type'] == 'UnstructuredGrid' assert root.attrib['version'] in ['0.1', '1.0'], \ 'Unknown VTU file version \'{}\'.'.format(root.attrib['version']) try: assert root.attrib['compressor'] == 'vtkZLibDataCompressor' except KeyError: pass self.header_type = ( root.attrib['header_type'] if 'header_type' in root.attrib else 'UInt32' ) try: self.byte_order = root.attrib['byte_order'] assert self.byte_order in ['LittleEndian', 'BigEndian'], \ 'Unknown byte order \'{}\'.'.format(self.byte_order) except KeyError: self.byte_order = None grid = None self.appended_data = None for c in root: if c.tag == 'UnstructuredGrid': assert grid is None, 'More than one UnstructuredGrid found.' grid = c else: assert c.tag == 'AppendedData', \ 'Unknown main tag \'{}\'.'.format(c.tag) assert self.appended_data is None, \ 'More than one AppendedData found.' assert c.attrib['encoding'] == 'base64' self.appended_data = c.text.strip() # The appended data always begins with a (meaningless) # underscore. assert self.appended_data[0] == '_' self.appended_data = self.appended_data[1:] assert grid is not None, 'No UnstructuredGrid found.' piece = None for c in grid: if c.tag == 'Piece': assert piece is None, 'More than one Piece found.' piece = c else: assert c.tag == 'FieldData', \ 'Unknown grid subtag \'{}\'.'.format(c.tag) # TODO test field data for data_array in c: field_data[data_array.attrib['Name']] = \ self.read_data(data_array) assert piece is not None, 'No Piece found.' num_points = int(piece.attrib['NumberOfPoints']) num_cells = int(piece.attrib['NumberOfCells']) for child in piece: if child.tag == 'Points': data_arrays = list(child) assert len(data_arrays) == 1 data_array = data_arrays[0] assert data_array.tag == 'DataArray' points = self.read_data(data_array) num_components = int(data_array.attrib['NumberOfComponents']) points = points.reshape(num_points, num_components) elif child.tag == 'Cells': for data_array in child: assert data_array.tag == 'DataArray' cells[data_array.attrib['Name']] = \ self.read_data(data_array) assert len(cells['offsets']) == num_cells assert len(cells['types']) == num_cells elif child.tag == 'PointData': for c in child: assert c.tag == 'DataArray' point_data[c.attrib['Name']] = self.read_data(c) else: assert child.tag == 'CellData', \ 'Unknown tag \'{}\'.'.format(child.tag) for c in child: assert c.tag == 'DataArray' cell_data_raw[c.attrib['Name']] = self.read_data(c) assert points is not None assert 'connectivity' in cells assert 'offsets' in cells assert 'types' in cells cells, cell_data = _cells_from_data( cells['connectivity'], cells['offsets'], cells['types'], cell_data_raw ) self.points = points self.cells = cells self.point_data = point_data self.cell_data = cell_data self.field_data = field_data return def read_binary(self, data, data_type): # first read the the block size; it determines the size of the header dtype = vtu_to_numpy_type[self.header_type] num_bytes_per_item = numpy.dtype(dtype).itemsize num_chars = num_bytes_to_num_base64_chars(num_bytes_per_item) byte_string = base64.b64decode(data[:num_chars])[:num_bytes_per_item] num_blocks = numpy.fromstring(byte_string, dtype)[0] # read the entire header num_header_items = 3 + num_blocks num_header_bytes = num_bytes_per_item * num_header_items num_header_chars = num_bytes_to_num_base64_chars(num_header_bytes) byte_string = base64.b64decode(data[:num_header_chars]) header = numpy.fromstring(byte_string, dtype) # num_blocks = header[0] # max_uncompressed_block_size = header[1] # last_compressed_block_size = header[2] block_sizes = header[3:] # Read the block data byte_array = base64.b64decode(data[num_header_chars:]) dtype = vtu_to_numpy_type[data_type] num_bytes_per_item = numpy.dtype(dtype).itemsize byte_offsets = numpy.concatenate( [[0], numpy.cumsum(block_sizes, dtype=block_sizes.dtype)] ) # https://github.com/numpy/numpy/issues/10135 byte_offsets = byte_offsets.astype(numpy.int64) # process the compressed data block_data = numpy.concatenate([ numpy.fromstring(zlib.decompress( byte_array[byte_offsets[k]:byte_offsets[k+1]] ), dtype=dtype) for k in range(num_blocks) ]) return block_data def read_data(self, c): if c.attrib['format'] == 'ascii': # ascii data = numpy.array( c.text.split(), dtype=vtu_to_numpy_type[c.attrib['type']] ) elif c.attrib['format'] == 'binary': data = self.read_binary(c.text.strip(), c.attrib['type']) else: # appended data assert c.attrib['format'] == 'appended', \ 'Unknown data format \'{}\'.'.format(c.attrib['format']) offset = int(c.attrib['offset']) data = self.read_binary( self.appended_data[offset:], c.attrib['type'] ) if 'NumberOfComponents' in c.attrib: data = data.reshape(-1, int(c.attrib['NumberOfComponents'])) return data def read(filename): reader = VtuReader(filename) return ( reader.points, reader.cells, reader.point_data, reader.cell_data, reader.field_data ) def write(filename, points, cells, point_data=None, cell_data=None, field_data=None, write_binary=True, pretty_xml=True ): if not write_binary: logging.warning('VTU ASCII files are only meant for debugging.') point_data = {} if point_data is None else point_data cell_data = {} if cell_data is None else cell_data field_data = {} if field_data is None else field_data header_type = 'UInt32' vtk_file = ET.Element( 'VTKFile', type='UnstructuredGrid', version='0.1', # Use the native endianness. Not strictly necessary, but this # simplifies things a bit. byte_order=( 'LittleEndian' if sys.byteorder == 'little' else 'BigEndian' ), header_type=header_type, compressor='vtkZLibDataCompressor' ) # swap the data to match the system byteorder # Don't use byteswap to make sure that the dtype is changed; see # . points = points.astype(points.dtype.newbyteorder('=')) for data in point_data.values(): data = data.astype(data.dtype.newbyteorder('=')) for data in cell_data.values(): for dat in data.values(): dat = dat.astype(dat.dtype.newbyteorder('=')) for data in field_data.values(): data = data.astype(data.dtype.newbyteorder('=')) def chunk_it(array, n): out = [] k = 0 while k*n < len(array): out.append(array[k*n:(k+1)*n]) k += 1 return out def numpy_to_xml_array(parent, name, fmt, data): da = ET.SubElement( parent, 'DataArray', type=numpy_to_vtu_type[data.dtype], Name=name, ) if len(data.shape) == 2: da.set('NumberOfComponents', '{}'.format(data.shape[1])) if write_binary: da.set('format', 'binary') max_block_size = 32768 data_bytes = data.tostring() blocks = chunk_it(data_bytes, max_block_size) num_blocks = len(blocks) last_block_size = len(blocks[-1]) compressed_blocks = [zlib.compress(block) for block in blocks] # collect header header = numpy.array( [num_blocks, max_block_size, last_block_size] + [len(b) for b in compressed_blocks], dtype=vtu_to_numpy_type[header_type] ) da.text = ( base64.b64encode(header.tostring()) + base64.b64encode(b''.join(compressed_blocks)) ).decode() else: da.set('format', 'ascii') s = BytesIO() numpy.savetxt(s, data.flatten(), fmt) da.text = s.getvalue().decode() return comment = \ ET.Comment('This file was created by meshio v{}'.format(__version__)) vtk_file.insert(1, comment) grid = ET.SubElement(vtk_file, 'UnstructuredGrid') total_num_cells = sum([len(c) for c in cells.values()]) piece = ET.SubElement( grid, 'Piece', NumberOfPoints='{}'.format(len(points)), NumberOfCells='{}'.format(total_num_cells) ) # points if points is not None: pts = ET.SubElement(piece, 'Points') numpy_to_xml_array(pts, 'Points', '%.11e', points) if cells is not None: cls = ET.SubElement(piece, 'Cells') # create connectivity, offset, type arrays connectivity = numpy.concatenate([ numpy.concatenate(v) for v in cells.values() ]) # offset (points to the first element of the next cell) offsets = [ v.shape[1] * numpy.arange(1, v.shape[0]+1) for v in cells.values() ] for k in range(1, len(offsets)): offsets[k] += offsets[k-1][-1] offsets = numpy.concatenate(offsets) # types types = numpy.concatenate([ numpy.full(len(v), meshio_to_vtk_type[k]) for k, v in cells.items() ]) numpy_to_xml_array(cls, 'connectivity', '%d', connectivity) numpy_to_xml_array(cls, 'offsets', '%d', offsets) numpy_to_xml_array(cls, 'types', '%d', types) if point_data: pd = ET.SubElement(piece, 'PointData') for name, data in point_data.items(): numpy_to_xml_array(pd, name, '%.11e', data) if cell_data: cd = ET.SubElement(piece, 'CellData') for name, data in raw_from_cell_data(cell_data).items(): numpy_to_xml_array(cd, name, '%.11e', data) write_xml(filename, vtk_file, pretty_xml) return def write_xml(filename, root, pretty_print=False, indent=4): if pretty_print: # https://stackoverflow.com/a/17402424/353337 def prettify(elem): import xml.dom.minidom rough_string = ET.tostring(elem, 'utf-8') reparsed = xml.dom.minidom.parseString(rough_string) return reparsed.toprettyxml(indent=indent*' ') with open(filename, 'w') as f: f.write(prettify(root)) else: tree = ET.ElementTree(root) tree.write(filename) return meshio-1.11.7/meshio/xdmf_io.py000066400000000000000000000425331323505575200163660ustar00rootroot00000000000000# -*- coding: utf-8 -*- # ''' I/O for XDMF. http://www.xdmf.org/index.php/XDMF_Model_and_Format .. moduleauthor:: Nico Schlömer ''' import os try: from StringIO import cStringIO as BytesIO except ImportError: from io import BytesIO import xml.etree.cElementTree as ET import numpy from .gmsh_io import cell_data_from_raw from .vtk_io import raw_from_cell_data from .vtu_io import write_xml def read(filename): return XdmfReader(filename).read() numpy_to_xdmf_dtype = { numpy.dtype(numpy.int32): ('Int', '4'), numpy.dtype(numpy.int64): ('Int', '8'), numpy.dtype(numpy.uint32): ('UInt', '4'), numpy.dtype(numpy.uint64): ('UInt', '8'), numpy.dtype(numpy.float32): ('Float', '4'), numpy.dtype(numpy.float64): ('Float', '8'), } xdmf_to_numpy_type = {v: k for k, v in numpy_to_xdmf_dtype.items()} dtype_to_format_string = { numpy.dtype(numpy.int32): '%d', numpy.dtype(numpy.int64): '%d', numpy.dtype(numpy.uint32): '%d', numpy.dtype(numpy.uint64): '%d', numpy.dtype(numpy.float32): '%.7e', numpy.dtype(numpy.float64): '%.15e', } # Check out # # for the list of indices. xdmf_idx_to_meshio_type = { 0x1: 'vertex', 0x2: 'line', 0x4: 'triangle', 0x5: 'quad', 0x6: 'tetra', 0x7: 'pyramid', 0x8: 'wedge', 0x9: 'hexahedron', 0x22: 'line3', 0x23: 'quad9', 0x24: 'triangle6', 0x25: 'quad8', 0x26: 'tetra10', 0x27: 'pyramid13', 0x28: 'wedge15', 0x29: 'wedge18', 0x30: 'hexahedron20', 0x31: 'hexahedron24', 0x32: 'hexahedron27', 0x33: 'hexahedron64', 0x34: 'hexahedron125', 0x35: 'hexahedron216', 0x36: 'hexahedron343', 0x37: 'hexahedron512', 0x38: 'hexahedron729', 0x39: 'hexahedron1000', 0x40: 'hexahedron1331', # 0x41: 'hexahedron_spectral_64', # 0x42: 'hexahedron_spectral_125', # 0x43: 'hexahedron_spectral_216', # 0x44: 'hexahedron_spectral_343', # 0x45: 'hexahedron_spectral_512', # 0x46: 'hexahedron_spectral_729', # 0x47: 'hexahedron_spectral_1000', # 0x48: 'hexahedron_spectral_1331', } meshio_type_to_xdmf_index = {v: k for k, v in xdmf_idx_to_meshio_type.items()} # See # # for XDMF types. # There appears to be no particular consistency, so allow for different # alternatives as well. meshio_to_xdmf_type = { 'vertex': ['Polyvertex'], 'line': ['Polyline'], 'triangle': ['Triangle'], 'quad': ['Quadrilateral'], 'tetra': ['Tetrahedron'], 'pyramid': ['Pyramid'], 'wedge': ['Wedge'], 'hexahedron': ['Hexahedron'], 'line3': ['Edge_3'], 'triangle6': ['Triangle_6', 'Tri_6'], 'quad8': ['Quadrilateral_8', 'Quad_8'], 'tetra10': ['Tetrahedron_10', 'Tet_10'], 'pyramid13': ['Pyramid_13'], 'wedge15': ['Wedge_15'], 'hexahedron20': ['Hexahedron_20', 'Hex_20'], } xdmf_to_meshio_type = { v: k for k, vals in meshio_to_xdmf_type.items() for v in vals } def _translate_mixed_cells(data): # Translate it into the cells dictionary. # `data` is a one-dimensional vector with # (cell_type1, p0, p1, ... ,pk, cell_type2, p10, p11, ..., p1k, ... # http://www.xdmf.org/index.php/XDMF_Model_and_Format#Topology # https://gitlab.kitware.com/xdmf/xdmf/blob/master/XdmfTopologyType.hpp#L394 xdmf_idx_to_num_nodes = { 1: 1, # vertex 4: 3, # triangle 5: 4, # quad 6: 4, # tet 7: 5, # pyramid 8: 6, # wedge 9: 8, # hex 11: 6, # triangle6 } # collect types and offsets types = [] offsets = [] r = 0 while r < len(data): types.append(data[r]) offsets.append(r) r += xdmf_idx_to_num_nodes[data[r]] + 1 offsets = numpy.array(offsets) # Collect types into bins. # See for better # alternatives. uniques = numpy.unique(types) bins = {u: numpy.where(types == u)[0] for u in uniques} cells = {} for tpe, b in bins.items(): meshio_type = xdmf_idx_to_meshio_type[tpe] assert (data[offsets[b]] == tpe).all() n = xdmf_idx_to_num_nodes[tpe] indices = numpy.array([ numpy.arange(1, n+1) + o for o in offsets[b] ]) cells[meshio_type] = data[indices] return cells class XdmfReader(object): def __init__(self, filename): self.filename = filename return def read(self): tree = ET.parse(self.filename) root = tree.getroot() assert root.tag == 'Xdmf' version = root.attrib['Version'] if version.split('.')[0] == '2': return self.read_xdmf2(root) assert version.split('.')[0] == '3', \ 'Unknown XDMF version {}.'.format(version) return self.read_xdmf3(root) def read_data_item(self, data_item): import h5py dims = [int(d) for d in data_item.attrib['Dimensions'].split()] # Actually, `NumberType` is XDMF2 and `DataType` XDMF3, but many files # out there use both keys interchangeably. if 'DataType' in data_item.attrib: assert 'NumberType' not in data_item.attrib data_type = data_item.attrib['DataType'] elif 'NumberType' in data_item.attrib: assert 'DataType' not in data_item.attrib data_type = data_item.attrib['NumberType'] else: # Default, see # data_type = 'Float' try: precision = data_item.attrib['Precision'] except KeyError: precision = '4' if data_item.attrib['Format'] == 'XML': return numpy.array( data_item.text.split(), dtype=xdmf_to_numpy_type[(data_type, precision)] ).reshape(dims) elif data_item.attrib['Format'] == 'Binary': return numpy.fromfile( data_item.text.strip(), dtype=xdmf_to_numpy_type[(data_type, precision)] ).reshape(dims) assert data_item.attrib['Format'] == 'HDF', \ 'Unknown XDMF Format \'{}\'.'.format(data_item.attrib['Format']) info = data_item.text.strip() filename, h5path = info.split(':') # The HDF5 file path is given with respect to the XDMF (XML) file. full_hdf5_path = os.path.join( os.path.dirname(self.filename), filename ) f = h5py.File(full_hdf5_path, 'r') assert h5path[0] == '/' for key in h5path[1:].split('/'): f = f[key] # `[()]` gives a numpy.ndarray return f[()] def read_xdmf2(self, root): domains = list(root) assert len(domains) == 1 domain = domains[0] assert domain.tag == 'Domain' grids = list(domain) assert len(grids) == 1, \ 'XDMF reader: Only supports one grid right now.' grid = grids[0] assert grid.tag == 'Grid' try: assert grid.attrib['GridType'] == 'Uniform' except KeyError: # The default is 'Uniform' pass points = None cells = {} point_data = {} cell_data_raw = {} field_data = {} for c in grid: if c.tag == 'Topology': data_items = list(c) assert len(data_items) == 1 meshio_type = xdmf_to_meshio_type[c.attrib['TopologyType']] cells[meshio_type] = self.read_data_item(data_items[0]) elif c.tag == 'Geometry': try: assert c.attrib['GeometryType'] == 'XYZ' except KeyError: # The default is 'XYZ' pass data_items = list(c) assert len(data_items) == 1 points = self.read_data_item(data_items[0]) else: assert c.tag == 'Attribute', \ 'Unknown section \'{}\'.'.format(c.tag) # assert c.attrib['Active'] == '1' # assert c.attrib['AttributeType'] == 'None' data_items = list(c) assert len(data_items) == 1 data = self.read_data_item(data_items[0]) name = c.attrib['Name'] if c.attrib['Center'] == 'Node': point_data[name] = data elif c.attrib['Center'] == 'Cell': cell_data_raw[name] = data else: # TODO field data? assert c.attrib['Center'] == 'Grid' cell_data = cell_data_from_raw(cells, cell_data_raw) return points, cells, point_data, cell_data, field_data def read_xdmf3(self, root): domains = list(root) assert len(domains) == 1 domain = domains[0] assert domain.tag == 'Domain' grids = list(domain) assert len(grids) == 1, \ 'XDMF reader: Only supports one grid right now.' grid = grids[0] assert grid.tag == 'Grid' points = None cells = {} point_data = {} cell_data_raw = {} field_data = {} for c in grid: if c.tag == 'Topology': data_items = list(c) assert len(data_items) == 1 data_item = data_items[0] data = self.read_data_item(data_item) # The XDMF2 key is `TopologyType`, just `Type` for XDMF3. # Allow both. if 'Type' in c.attrib: assert 'TopologyType' not in c.attrib cell_type = c.attrib['Type'] else: cell_type = c.attrib['TopologyType'] if cell_type == 'Mixed': cells = _translate_mixed_cells(data) else: meshio_type = xdmf_to_meshio_type[cell_type] cells[meshio_type] = data elif c.tag == 'Geometry': try: geometry_type = c.attrib['GeometryType'] except KeyError: geometry_type = 'XYZ' data_items = list(c) assert len(data_items) == 1 data_item = data_items[0] points = self.read_data_item(data_item) if geometry_type == 'XY': points = numpy.column_stack([ points, numpy.zeros(len(points)) ]) else: assert c.tag == 'Attribute', \ 'Unknown section \'{}\'.'.format(c.tag) # Don't be too struct here: FEniCS, for example, calls this # 'AttributeType'. # assert c.attrib['Type'] == 'None' data_items = list(c) assert len(data_items) == 1 data_item = data_items[0] data = self.read_data_item(data_item) name = c.attrib['Name'] if c.attrib['Center'] == 'Node': point_data[name] = data else: assert c.attrib['Center'] == 'Cell' cell_data_raw[name] = data cell_data = cell_data_from_raw(cells, cell_data_raw) return points, cells, point_data, cell_data, field_data class XdmfWriter(object): def __init__( self, filename, points, cells, point_data=None, cell_data=None, field_data=None, pretty_xml=True, data_format='HDF', ): assert data_format in ['XML', 'Binary', 'HDF'], ( 'Unknown XDMF data format ' '\'{}\' (use \'XML\', \'Binary\', or \'HDF\'.)'.format(data_format) ) point_data = {} if point_data is None else point_data cell_data = {} if cell_data is None else cell_data field_data = {} if field_data is None else field_data self.filename = filename self.data_format = data_format self.data_counter = 0 if data_format == 'HDF': import h5py self.h5_filename = os.path.splitext(self.filename)[0] + '.h5' self.h5_file = h5py.File(self.h5_filename, 'w') xdmf_file = ET.Element('Xdmf', Version='3.0') domain = ET.SubElement(xdmf_file, 'Domain') grid = ET.SubElement(domain, 'Grid', Name='Grid') self.points(grid, points) self.cells(cells, grid) self.point_data(point_data, grid) self.cell_data(cell_data, grid) ET.register_namespace('xi', 'https://www.w3.org/2001/XInclude/') write_xml(filename, xdmf_file, pretty_xml, indent=2) return def numpy_to_xml_string(self, data): if self.data_format == 'XML': s = BytesIO() fmt = dtype_to_format_string[data.dtype] numpy.savetxt(s, data.flatten(), fmt) return s.getvalue().decode() elif self.data_format == 'Binary': bin_filename = '{}{}.bin'.format( os.path.splitext(self.filename)[0], self.data_counter, ) self.data_counter += 1 # write binary data to file with open(bin_filename, 'wb') as f: data.tofile(f) return bin_filename assert self.data_format == 'HDF' name = 'data{}'.format(self.data_counter) self.data_counter += 1 self.h5_file.create_dataset(name, data=data) return self.h5_filename + ':/' + name def points(self, grid, points): geo = ET.SubElement(grid, 'Geometry', Type='XYZ') dt, prec = numpy_to_xdmf_dtype[points.dtype] dim = '{} {}'.format(*points.shape) data_item = ET.SubElement( geo, 'DataItem', DataType=dt, Dimensions=dim, Format=self.data_format, Precision=prec ) data_item.text = self.numpy_to_xml_string(points) return def cells(self, cells, grid): if len(cells) == 1: meshio_type = list(cells.keys())[0] xdmf_type = meshio_to_xdmf_type[meshio_type][0] topo = ET.SubElement(grid, 'Topology', Type=xdmf_type) dt, prec = numpy_to_xdmf_dtype[cells[meshio_type].dtype] dim = '{} {}'.format(*cells[meshio_type].shape) data_item = ET.SubElement( topo, 'DataItem', DataType=dt, Dimensions=dim, Format=self.data_format, Precision=prec ) data_item.text = \ self.numpy_to_xml_string(cells[meshio_type]) elif len(cells) > 1: topo = ET.SubElement(grid, 'Topology', Type='Mixed') total_num_cells = sum(c.shape[0] for c in cells.values()) total_num_cell_items = \ sum(numpy.prod(c.shape) for c in cells.values()) dim = str(total_num_cell_items + total_num_cells) # Lines translate to Polylines, and one needs to specify the exact # number of nodes. Hence, prepend 2. if 'line' in cells: cells['line'] = numpy.insert(cells['line'], 0, 2, axis=1) cd = numpy.concatenate([ # prepend column with xdmf type index numpy.insert( value, 0, meshio_type_to_xdmf_index[key], axis=1 ).flatten() for key, value in cells.items() ]) dt, prec = numpy_to_xdmf_dtype[cd.dtype] data_item = ET.SubElement( topo, 'DataItem', DataType=dt, Dimensions=dim, Format=self.data_format, Precision=prec ) data_item.text = self.numpy_to_xml_string(cd) return def point_data(self, point_data, grid): for name, data in point_data.items(): att = ET.SubElement( grid, 'Attribute', Name=name, Type='None', Center='Node' ) dt, prec = numpy_to_xdmf_dtype[data.dtype] dim = ' '.join([str(s) for s in data.shape]) data_item = ET.SubElement( att, 'DataItem', DataType=dt, Dimensions=dim, Format=self.data_format, Precision=prec ) data_item.text = self.numpy_to_xml_string(data) return def cell_data(self, cell_data, grid): raw = raw_from_cell_data(cell_data) for name, data in raw.items(): att = ET.SubElement( grid, 'Attribute', Name=name, Type='None', Center='Cell' ) dt, prec = numpy_to_xdmf_dtype[data.dtype] dim = ' '.join([str(s) for s in data.shape]) data_item = ET.SubElement( att, 'DataItem', DataType=dt, Dimensions=dim, Format=self.data_format, Precision=prec ) data_item.text = self.numpy_to_xml_string(data) return def write(*args, **kwargs): XdmfWriter(*args, **kwargs) return meshio-1.11.7/setup.py000066400000000000000000000030311323505575200146030ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import codecs import os from setuptools import setup, find_packages # https://packaging.python.org/single_source_version/ base_dir = os.path.abspath(os.path.dirname(__file__)) about = {} with open(os.path.join(base_dir, 'meshio', '__about__.py'), 'rb') as f: exec(f.read(), about) def read(fname): try: content = codecs.open( os.path.join(os.path.dirname(__file__), fname), encoding='utf-8' ).read() except Exception: content = '' return content setup( name='meshio', version=about['__version__'], author=about['__author__'], author_email=about['__author_email__'], packages=find_packages(), description='I/O for various mesh formats', long_description=read('README.rst'), url='https://github.com/nschloe/meshio', download_url='https://pypi.python.org/pypi/meshio', license=about['__license__'], platforms='any', install_requires=[ 'numpy', 'pipdate', ], extras_require={ 'exodus': ['netCDF4'], 'hdf5': ['h5py'], # MED, MOAB, XDMF formats }, scripts=[ 'tools/meshio-convert', ], classifiers=[ about['__status__'], about['__license__'], 'Intended Audience :: Science/Research', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Topic :: Scientific/Engineering' ] ) meshio-1.11.7/test/000077500000000000000000000000001323505575200140535ustar00rootroot00000000000000meshio-1.11.7/test/helpers.py000066400000000000000000000175061323505575200161000ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import os import string import numpy # In general: # Use values with an infinite decimal representation to test precision. tri_mesh = { 'points': numpy.array([ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0] ]) / 3, 'cells': { 'triangle': numpy.array([ [0, 1, 2], [0, 2, 3] ]) }, } triangle6_mesh = { 'points': numpy.array([ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.5, 0.25, 0.0], [1.25, 0.5, 0.0], [0.25, 0.75, 0.0], [2.0, 1.0, 0.0], [1.5, 1.25, 0.0], [1.75, 0.25, 0.0], ]) / 3.0, 'cells': { 'triangle6': numpy.array([ [0, 1, 2, 3, 4, 5], [1, 6, 2, 8, 7, 4] ]) }, } quad_mesh = { 'points': numpy.array([ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [2.0, 0.0, 0.0], [2.0, 1.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], ]) / 3.0, 'cells': { 'quad': numpy.array([ [0, 1, 4, 5], [1, 2, 3, 4], ]) }, } d = 0.1 quad8_mesh = { 'points': numpy.array([ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, d, 0.0], [1-d, 0.5, 0.0], [0.5, 1-d, 0.0], [d, 0.5, 0.0], [2.0, 0.0, 0.0], [2.0, 1.0, 0.0], [1.5, -d, 0.0], [2+d, 0.5, 0.0], [1.5, 1+d, 0.0], ]) / 3.0, 'cells': { 'quad8': numpy.array([ [0, 1, 2, 3, 4, 5, 6, 7], [1, 8, 9, 2, 10, 11, 12, 5], ]) }, } tri_quad_mesh = { 'points': numpy.array([ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [2.0, 0.0, 0.0], [2.0, 1.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0] ]) / 3.0, 'cells': { 'triangle': numpy.array([ [0, 1, 4], [0, 4, 5] ]), 'quad': numpy.array([ [1, 2, 3, 4] ]) } } tet_mesh = { 'points': numpy.array([ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.5, 0.5], ]) / 3.0, 'cells': { 'tetra': numpy.array([ [0, 1, 2, 4], [0, 2, 3, 4] ]) }, } tet10_mesh = { 'points': numpy.array([ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.5, 0.5, 0.5], # [0.5, 0.0, 0.1], [1.0, 0.5, 0.1], [0.5, 0.5, 0.1], [0.25, 0.3, 0.25], [0.8, 0.25, 0.25], [0.7, 0.7, 0.3], ]) / 3.0, 'cells': { 'tetra10': numpy.array([ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], ]) }, } hex_mesh = { 'points': numpy.array([ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0], [0.0, 1.0, 1.0], ]), 'cells': { 'hexahedron': numpy.array([ [0, 1, 2, 3, 4, 5, 6, 7], ]) }, } hex20_mesh = { 'points': numpy.array([ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0], [0.0, 1.0, 1.0], # [0.5, 0.0, 0.0], [1.0, 0.5, 0.0], [0.5, 1.0, 0.0], [0.0, 0.5, 0.0], # [0.0, 0.0, 0.5], [1.0, 0.0, 0.5], [1.0, 1.0, 0.5], [0.0, 1.0, 0.5], # [0.5, 0.0, 1.0], [1.0, 0.5, 1.0], [0.5, 1.0, 1.0], [0.0, 0.5, 1.0], ]), 'cells': { 'hexahedron20': numpy.array([range(20)]) }, } def _clone(mesh): mesh2 = { 'points': numpy.copy(mesh['points']) } mesh2['cells'] = {} for key, data in mesh['cells'].items(): mesh2['cells'][key] = numpy.copy(data) return mesh2 def add_point_data(mesh, dim, num_tags=2): numpy.random.seed(0) mesh2 = _clone(mesh) if dim == 1: data = [ numpy.random.rand(len(mesh['points'])) for _ in range(num_tags) ] else: data = [ numpy.random.rand(len(mesh['points']), dim) for _ in range(num_tags) ] mesh2['point_data'] = { string.ascii_lowercase[k]: d for k, d in enumerate(data) } return mesh2 def add_cell_data(mesh, dim, num_tags=2): mesh2 = _clone(mesh) numpy.random.seed(0) cell_data = {} for cell_type in mesh['cells']: num_cells = len(mesh['cells'][cell_type]) if dim == 1: cell_data[cell_type] = { string.ascii_lowercase[k]: numpy.random.rand(num_cells) for k in range(num_tags) } else: cell_data[cell_type] = { string.ascii_lowercase[k]: numpy.random.rand(num_cells, dim) for k in range(num_tags) } mesh2['cell_data'] = cell_data return mesh2 def add_field_data(mesh, value, dtype): mesh2 = _clone(mesh) field_data = { 'a': numpy.array(value, dtype=dtype), } mesh2['field_data'] = field_data return mesh2 def write_read(writer, reader, mesh, atol): '''Write and read a file, and make sure the data is the same as before. ''' try: input_point_data = mesh['point_data'] except KeyError: input_point_data = {} try: input_cell_data = mesh['cell_data'] except KeyError: input_cell_data = {} try: input_field_data = mesh['field_data'] except KeyError: input_field_data = {} filename = 'test.dat' writer( filename, mesh['points'], mesh['cells'], point_data=input_point_data, cell_data=input_cell_data, field_data=input_field_data, ) points, cells, point_data, cell_data, field_data = reader(filename) # Numpy's array_equal is too strict here, cf. # . # Use allclose. # We cannot compare the exact rows here since the order of the points might # have changes. Just compare the sums assert numpy.allclose(mesh['points'], points, atol=atol, rtol=0.0) for cell_type, data in mesh['cells'].items(): assert numpy.allclose(data, cells[cell_type]) for key in input_point_data.keys(): assert numpy.allclose( input_point_data[key], point_data[key], atol=atol, rtol=0.0 ) for cell_type, cell_type_data in input_cell_data.items(): for key, data in cell_type_data.items(): assert numpy.allclose( data, cell_data[cell_type][key], atol=atol, rtol=0.0 ) for name, data in input_field_data.items(): assert numpy.allclose( data, field_data[name], atol=atol, rtol=0.0 ) os.remove(filename) return meshio-1.11.7/test/legacy_reader.py000066400000000000000000000144071323505575200172210ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import numpy from meshio.vtk_io import vtk_to_meshio_type def read(filetype, filename): # pylint: disable=import-error import vtk from vtk.util import numpy_support def _read_data(data): '''Extract numpy arrays from a VTK data set. ''' # Go through all arrays, fetch data. out = {} for k in range(data.GetNumberOfArrays()): array = data.GetArray(k) if array: array_name = array.GetName() out[array_name] = numpy.copy( vtk.util.numpy_support.vtk_to_numpy(array) ) return out def _read_cells(vtk_mesh): data = numpy.copy(vtk.util.numpy_support.vtk_to_numpy( vtk_mesh.GetCells().GetData() )) offsets = numpy.copy(vtk.util.numpy_support.vtk_to_numpy( vtk_mesh.GetCellLocationsArray() )) types = numpy.copy(vtk.util.numpy_support.vtk_to_numpy( vtk_mesh.GetCellTypesArray() )) # `data` is a one-dimensional vector with # (num_points0, p0, p1, ... ,pk, numpoints1, p10, p11, ..., p1k, ... # Translate it into the cells dictionary. cells = {} for vtk_type, meshio_type in vtk_to_meshio_type.items(): # Get all offsets for vtk_type os = offsets[numpy.argwhere(types == vtk_type).transpose()[0]] num_cells = len(os) if num_cells > 0: num_pts = data[os[0]] # instantiate the array arr = numpy.empty((num_cells, num_pts), dtype=int) # sort the num_pts entries after the offsets into the columns # of arr for k in range(num_pts): arr[:, k] = data[os+k+1] cells[meshio_type] = arr return cells if filetype in ['vtk', 'vtk-ascii', 'vtk-binary']: reader = vtk.vtkUnstructuredGridReader() reader.SetFileName(filename) reader.SetReadAllNormals(1) reader.SetReadAllScalars(1) reader.SetReadAllTensors(1) reader.SetReadAllVectors(1) reader.Update() vtk_mesh = reader.GetOutput() elif filetype in ['vtu', 'vtu-ascii', 'vtu-binary']: reader = vtk.vtkXMLUnstructuredGridReader() reader.SetFileName(filename) reader.Update() vtk_mesh = reader.GetOutput() elif filetype in ['xdmf', 'xdmf2']: reader = vtk.vtkXdmfReader() reader.SetFileName(filename) reader.SetReadAllColorScalars(1) reader.SetReadAllFields(1) reader.SetReadAllNormals(1) reader.SetReadAllScalars(1) reader.SetReadAllTCoords(1) reader.SetReadAllTensors(1) reader.SetReadAllVectors(1) reader.Update() vtk_mesh = reader.GetOutputDataObject(0) elif filetype == 'xdmf3': reader = vtk.vtkXdmf3Reader() reader.SetFileName(filename) reader.SetReadAllColorScalars(1) reader.SetReadAllFields(1) reader.SetReadAllNormals(1) reader.SetReadAllScalars(1) reader.SetReadAllTCoords(1) reader.SetReadAllTensors(1) reader.SetReadAllVectors(1) reader.Update() vtk_mesh = reader.GetOutputDataObject(0) else: assert filetype == 'exodus', \ 'Unknown file type \'{}\'.'.format(filename) reader = vtk.vtkExodusIIReader() reader.SetFileName(filename) vtk_mesh = _read_exodusii_mesh(reader) # Explicitly extract points, cells, point data, field data points = numpy.copy(numpy_support.vtk_to_numpy( vtk_mesh.GetPoints().GetData() )) cells = _read_cells(vtk_mesh) point_data = _read_data(vtk_mesh.GetPointData()) field_data = _read_data(vtk_mesh.GetFieldData()) cell_data = _read_data(vtk_mesh.GetCellData()) # split cell_data by the cell type cd = {} index = 0 for cell_type in cells: num_cells = len(cells[cell_type]) cd[cell_type] = {} for name, array in cell_data.items(): cd[cell_type][name] = array[index:index+num_cells] index += num_cells cell_data = cd return points, cells, point_data, cell_data, field_data def _read_exodusii_mesh(reader, timestep=None): '''Uses a vtkExodusIIReader to return a vtkUnstructuredGrid. ''' # Fetch metadata. reader.UpdateInformation() # Set time step to read. if timestep: reader.SetTimeStep(timestep) # Make sure the point data are read during Update(). for k in range(reader.GetNumberOfPointResultArrays()): arr_name = reader.GetPointResultArrayName(k) reader.SetPointResultArrayStatus(arr_name, 1) # Make sure the cell data are read during Update(). for k in range(reader.GetNumberOfElementResultArrays()): arr_name = reader.GetElementResultArrayName(k) reader.SetElementResultArrayStatus(arr_name, 1) # Make sure all field data is read. for k in range(reader.GetNumberOfGlobalResultArrays()): arr_name = reader.GetGlobalResultArrayName(k) reader.SetGlobalResultArrayStatus(arr_name, 1) # Read the file. reader.Update() out = reader.GetOutput() # Loop through the blocks and search for a vtkUnstructuredGrid. # In Exodus, different element types are stored different meshes, with # point information possibly duplicated. vtk_mesh = [] for i in range(out.GetNumberOfBlocks()): blk = out.GetBlock(i) for j in range(blk.GetNumberOfBlocks()): sub_block = blk.GetBlock(j) if sub_block is not None and sub_block.IsA('vtkUnstructuredGrid'): vtk_mesh.append(sub_block) assert vtk_mesh, 'No \'vtkUnstructuredGrid\' found!' assert len(vtk_mesh) == 1, 'More than one \'vtkUnstructuredGrid\' found!' # Cut off trailing '_' from array names. for k in range(vtk_mesh[0].GetPointData().GetNumberOfArrays()): array = vtk_mesh[0].GetPointData().GetArray(k) array_name = array.GetName() if array_name[-1] == '_': array.SetName(array_name[0:-1]) # time_values = reader.GetOutputInformation(0).Get( # vtkStreamingDemandDrivenPipeline.TIME_STEPS() # ) return vtk_mesh[0] # , time_values meshio-1.11.7/test/legacy_writer.py000066400000000000000000000161371323505575200172750ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import logging import numpy # https://www.vtk.org/doc/nightly/html/vtkCellType_8h_source.html vtk_to_meshio_type = { 0: 'empty', 1: 'vertex', # 2: 'poly_vertex', 3: 'line', # 4: 'poly_line', 5: 'triangle', # 6: 'triangle_strip', # 7: 'polygon', # 8: 'pixel', 9: 'quad', 10: 'tetra', # 11: 'voxel', 12: 'hexahedron', 13: 'wedge', 14: 'pyramid', 15: 'penta_prism', 16: 'hexa_prism', 21: 'line3', 22: 'triangle6', 23: 'quad8', 24: 'tetra10', 25: 'hexahedron20', 26: 'wedge15', 27: 'pyramid13', 28: 'quad9', 29: 'hexahedron27', 30: 'quad6', 31: 'wedge12', 32: 'wedge18', 33: 'hexahedron24', 34: 'triangle7', 35: 'line4', # # 60: VTK_HIGHER_ORDER_EDGE, # 61: VTK_HIGHER_ORDER_TRIANGLE, # 62: VTK_HIGHER_ORDER_QUAD, # 63: VTK_HIGHER_ORDER_POLYGON, # 64: VTK_HIGHER_ORDER_TETRAHEDRON, # 65: VTK_HIGHER_ORDER_WEDGE, # 66: VTK_HIGHER_ORDER_PYRAMID, # 67: VTK_HIGHER_ORDER_HEXAHEDRON, } def write(filetype, filename, points, cells, point_data=None, cell_data=None, field_data=None ): # pylint: disable=import-error import vtk def _create_vtkarray(X, name): array = vtk.util.numpy_support.numpy_to_vtk(X, deep=1) array.SetName(name) return array point_data = {} if point_data is None else point_data cell_data = {} if cell_data is None else cell_data field_data = {} if field_data is None else field_data # assert data integrity for key in point_data: assert len(point_data[key]) == len(points), \ 'Point data mismatch.' for key in cell_data: assert key in cells, 'Cell data without cell' for key2 in cell_data[key]: assert len(cell_data[key][key2]) == len(cells[key]), \ 'Cell data mismatch.' vtk_mesh = _generate_vtk_mesh(points, cells) # add point data pd = vtk_mesh.GetPointData() for name, X in point_data.items(): # There is a naming inconsistency in VTK when it comes to multivectors # in Exodus files: # If a vector 'v' has two components, they are called 'v_x', 'v_y' # (note the underscore), if it has three, then they are called 'vx', # 'vy', 'vz'. See bug . # For VT{K,U} files, no underscore is ever added. pd.AddArray(_create_vtkarray(X, name)) # Add cell data. # The cell_data is structured like # # cell_type -> # key -> array # key -> array # [...] # cell_type -> # key -> array # key -> array # [...] # [...] # # VTK expects one array for each `key`, so assemble the keys across all # mesh_types. This requires each key to be present for each mesh_type, of # course. all_keys = [] for cell_type in cell_data: all_keys += cell_data[cell_type].keys() # create unified cell data for key in all_keys: for cell_type in cell_data: assert key in cell_data[cell_type] unified_cell_data = { key: numpy.concatenate([ cell_data[cell_type][key] for cell_type in cell_data ]) for key in all_keys } # add the array data to the mesh cd = vtk_mesh.GetCellData() for name, array in unified_cell_data.items(): cd.AddArray(_create_vtkarray(array, name)) # add field data fd = vtk_mesh.GetFieldData() for key, value in field_data.items(): fd.AddArray(_create_vtkarray(value, key)) if filetype in 'vtk-ascii': logging.warning('VTK ASCII files are only meant for debugging.') writer = vtk.vtkUnstructuredGridWriter() writer.SetFileTypeToASCII() elif filetype == 'vtk-binary': writer = vtk.vtkUnstructuredGridWriter() writer.SetFileTypeToBinary() elif filetype == 'vtu-ascii': logging.warning('VTU ASCII files are only meant for debugging.') writer = vtk.vtkXMLUnstructuredGridWriter() writer.SetDataModeToAscii() elif filetype == 'vtu-binary': writer = vtk.vtkXMLUnstructuredGridWriter() writer.SetDataModeToBinary() elif filetype == 'xdmf2': writer = vtk.vtkXdmfWriter() elif filetype == 'xdmf3': writer = vtk.vtkXdmf3Writer() else: assert filetype == 'exodus', \ 'Unknown file type \'{}\'.'.format(filename) writer = vtk.vtkExodusIIWriter() # if the mesh contains vtkmodeldata information, make use of it # and write out all time steps. writer.WriteAllTimeStepsOn() writer.SetFileName(filename) try: writer.SetInput(vtk_mesh) except AttributeError: writer.SetInputData(vtk_mesh) writer.Write() return def _generate_vtk_mesh(points, cells): # pylint: disable=import-error import vtk from vtk.util import numpy_support mesh = vtk.vtkUnstructuredGrid() # set points vtk_points = vtk.vtkPoints() # Not using a deep copy here results in a segfault. vtk_array = numpy_support.numpy_to_vtk(points, deep=True) vtk_points.SetData(vtk_array) mesh.SetPoints(vtk_points) # Set cells. meshio_to_vtk_type = {y: x for x, y in vtk_to_meshio_type.items()} # create cell_array. It's a one-dimensional vector with # (num_points2, p0, p1, ... ,pk, numpoints1, p10, p11, ..., p1k, ... cell_types = [] cell_offsets = [] cell_connectivity = [] len_array = 0 for meshio_type, data in cells.items(): numcells, num_local_nodes = data.shape vtk_type = meshio_to_vtk_type[meshio_type] # add cell types cell_types.append(numpy.empty(numcells, dtype=numpy.ubyte)) cell_types[-1].fill(vtk_type) # add cell offsets cell_offsets.append(numpy.arange( len_array, len_array + numcells * (num_local_nodes + 1), num_local_nodes + 1, dtype=numpy.int64 )) cell_connectivity.append( numpy.c_[ num_local_nodes * numpy.ones(numcells, dtype=data.dtype), data ].flatten() ) len_array += len(cell_connectivity[-1]) cell_types = numpy.concatenate(cell_types) cell_offsets = numpy.concatenate(cell_offsets) cell_connectivity = numpy.concatenate(cell_connectivity) connectivity = vtk.util.numpy_support.numpy_to_vtkIdTypeArray( cell_connectivity.astype(numpy.int64), deep=1 ) # wrap the data into a vtkCellArray cell_array = vtk.vtkCellArray() cell_array.SetCells(len(cell_types), connectivity) # Add cell data to the mesh mesh.SetCells( numpy_support.numpy_to_vtk( cell_types, deep=1, array_type=vtk.vtkUnsignedCharArray().GetDataType() ), numpy_support.numpy_to_vtk( cell_offsets, deep=1, array_type=vtk.vtkIdTypeArray().GetDataType() ), cell_array ) return mesh meshio-1.11.7/test/test_ansys.py000066400000000000000000000010711323505575200166200ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import pytest import meshio import helpers @pytest.mark.parametrize('mesh', [ helpers.tri_mesh, helpers.quad_mesh, helpers.tri_quad_mesh, helpers.tet_mesh, helpers.hex_mesh, ]) @pytest.mark.parametrize('write_binary', [False, True]) def test(mesh, write_binary): def writer(*args, **kwargs): return meshio.ansys_io.write( *args, write_binary=write_binary, **kwargs ) helpers.write_read(writer, meshio.ansys_io.read, mesh, 1.0e-15) return meshio-1.11.7/test/test_dolfin.py000066400000000000000000000006041323505575200167370ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import pytest import meshio import helpers @pytest.mark.parametrize('mesh', [ helpers.tri_mesh, helpers.tet_mesh, helpers.add_cell_data(helpers.tri_mesh, 1), ]) def test_io(mesh): helpers.write_read( meshio.dolfin_io.write, meshio.dolfin_io.read, mesh, 1.0e-15 ) return meshio-1.11.7/test/test_exodus.py000066400000000000000000000026641323505575200170030ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import pytest import meshio import helpers import legacy_reader import legacy_writer vtk = pytest.importorskip('vtk') test_set = [ helpers.tri_mesh, helpers.triangle6_mesh, helpers.quad_mesh, helpers.quad8_mesh, helpers.tri_quad_mesh, helpers.tet_mesh, helpers.tet10_mesh, helpers.hex_mesh, helpers.hex20_mesh, helpers.add_point_data(helpers.tri_mesh, 1), # helpers.add_point_data(helpers.tri_mesh, 2), # helpers.add_point_data(helpers.tri_mesh, 3), ] @pytest.mark.parametrize('mesh', test_set) def test_io(mesh): helpers.write_read( meshio.exodus_io.write, meshio.exodus_io.read, mesh, 1.0e-15 ) return @pytest.mark.parametrize('mesh', [ helpers.tri_mesh, helpers.quad_mesh, helpers.tri_quad_mesh, helpers.tet_mesh, helpers.hex_mesh, ]) def test_legacy_writer(mesh): # test with legacy writer def lw(*args, **kwargs): return legacy_writer.write('exodus', *args, **kwargs) # The legacy writer only writes with low precision. helpers.write_read(lw, meshio.exodus_io.read, mesh, 1.0e-15) return @pytest.mark.parametrize('mesh', [ helpers.tri_mesh, helpers.hex_mesh, ]) def test_legacy_reader(mesh): def lr(filename): return legacy_reader.read('exodus', filename) helpers.write_read(meshio.exodus_io.write, lr, mesh, 1.0e-4) return meshio-1.11.7/test/test_gmsh.py000066400000000000000000000020721323505575200164230ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import pytest import meshio import helpers @pytest.mark.parametrize('mesh', [ helpers.tri_mesh, helpers.triangle6_mesh, helpers.quad_mesh, helpers.quad8_mesh, helpers.tri_quad_mesh, helpers.tet_mesh, helpers.tet10_mesh, helpers.hex_mesh, helpers.hex20_mesh, helpers.add_point_data(helpers.tri_mesh, 1), helpers.add_point_data(helpers.tri_mesh, 3), helpers.add_point_data(helpers.tri_mesh, 9), helpers.add_cell_data(helpers.tri_mesh, 1), helpers.add_cell_data(helpers.tri_mesh, 3), helpers.add_cell_data(helpers.tri_mesh, 9), helpers.add_field_data(helpers.tri_mesh, [1, 2], int), helpers.add_field_data(helpers.tet_mesh, [1, 3], int), helpers.add_field_data(helpers.hex_mesh, [1, 3], int), ]) @pytest.mark.parametrize('write_binary', [False, True]) def test_gmsh(mesh, write_binary): def writer(*args, **kwargs): return meshio.gmsh_io.write(*args, write_binary=write_binary, **kwargs) helpers.write_read(writer, meshio.gmsh_io.read, mesh, 1.0e-15) return meshio-1.11.7/test/test_helpers.py000066400000000000000000000012711323505575200171270ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import pytest import meshio import helpers @pytest.mark.parametrize('filename', [ 'test.e', 'test.med', 'test.mesh', 'test.msh', 'test.xml', 'test.post', 'test.h5m', 'test.off', 'test.vtk', 'test.vtu', 'test.xmf', ]) def test_generic_io(filename): meshio.write( filename, helpers.tri_mesh['points'], helpers.tri_mesh['cells'], ) points, cells, _, _, _ = meshio.helpers.read(filename) assert (abs(points - helpers.tri_mesh['points']) < 1.0e-15).all() assert ( helpers.tri_mesh['cells']['triangle'] == cells['triangle'] ).all() return meshio-1.11.7/test/test_med.py000066400000000000000000000013231323505575200162300ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import pytest import meshio import helpers h5py = pytest.importorskip('h5py') @pytest.mark.parametrize('mesh', [ helpers.tri_mesh, helpers.quad_mesh, helpers.tet_mesh, helpers.hex_mesh, helpers.add_point_data(helpers.tri_mesh, 1), helpers.add_point_data(helpers.tri_mesh, 2), helpers.add_point_data(helpers.tri_mesh, 3), helpers.add_point_data(helpers.hex_mesh, 3), helpers.add_cell_data(helpers.tri_mesh, 1), helpers.add_cell_data(helpers.tri_mesh, 2), helpers.add_cell_data(helpers.tri_mesh, 3), ]) def test_io(mesh): helpers.write_read( meshio.med_io.write, meshio.med_io.read, mesh, 1.0e-15 ) return meshio-1.11.7/test/test_medit.py000066400000000000000000000006101323505575200165630ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import pytest import meshio import helpers @pytest.mark.parametrize('mesh', [ helpers.tri_mesh, helpers.quad_mesh, helpers.tri_quad_mesh, helpers.tet_mesh, ]) def test_io(mesh): helpers.write_read( meshio.medit_io.write, meshio.medit_io.read, mesh, 1.0e-15 ) return meshio-1.11.7/test/test_moab.py000066400000000000000000000005361323505575200164060ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import pytest import meshio import helpers h5py = pytest.importorskip('h5py') @pytest.mark.parametrize('mesh', [ helpers.tri_mesh, helpers.tet_mesh, ]) def test_io(mesh): helpers.write_read( meshio.h5m_io.write, meshio.h5m_io.read, mesh, 1.0e-15 ) return meshio-1.11.7/test/test_off.py000066400000000000000000000004271323505575200162410ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import pytest import meshio import helpers @pytest.mark.parametrize('mesh', [ helpers.tri_mesh ]) def test_io(mesh): helpers.write_read( meshio.off_io.write, meshio.off_io.read, mesh, 1.0e-15 ) return meshio-1.11.7/test/test_permas.py000066400000000000000000000005461323505575200167600ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import pytest import meshio import helpers @pytest.mark.parametrize('mesh', [ helpers.tri_mesh, helpers.quad_mesh, helpers.tri_quad_mesh, helpers.tet_mesh, ]) def test_io(mesh): helpers.write_read( meshio.permas_io.write, meshio.permas_io.read, mesh, 1.0e-15 ) return meshio-1.11.7/test/test_stl.py000066400000000000000000000007761323505575200163000ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import pytest import meshio import helpers @pytest.mark.parametrize('mesh', [ helpers.tri_mesh, ]) @pytest.mark.parametrize('write_binary, tol', [ (False, 1.0e-15), # binary STL only operates in single precision (True, 1.0e-8), ]) def test_stl(mesh, write_binary, tol): def writer(*args, **kwargs): return meshio.stl_io.write(*args, write_binary=write_binary, **kwargs) helpers.write_read(writer, meshio.stl_io.read, mesh, tol) return meshio-1.11.7/test/test_vtk.py000066400000000000000000000037271323505575200163010ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import pytest import meshio import helpers import legacy_reader import legacy_writer vtk = pytest.importorskip('vtk') test_set = [ helpers.tri_mesh, helpers.triangle6_mesh, helpers.quad_mesh, helpers.quad8_mesh, helpers.tri_quad_mesh, helpers.tet_mesh, helpers.tet10_mesh, helpers.hex_mesh, helpers.hex20_mesh, helpers.add_point_data(helpers.tri_mesh, 1), helpers.add_point_data(helpers.tri_mesh, 2), helpers.add_point_data(helpers.tri_mesh, 3), helpers.add_cell_data(helpers.tri_mesh, 1), helpers.add_cell_data(helpers.tri_mesh, 2), helpers.add_cell_data(helpers.tri_mesh, 3), ] @pytest.mark.parametrize('mesh', test_set) @pytest.mark.parametrize('write_binary', [True, False]) def test(mesh, write_binary): def writer(*args, **kwargs): return meshio.vtk_io.write(*args, write_binary=write_binary, **kwargs) helpers.write_read(writer, meshio.vtk_io.read, mesh, 1.0e-15) return @pytest.mark.parametrize('mesh', test_set) @pytest.mark.parametrize('write_binary', [True, False]) def test_legacy_writer(mesh, write_binary): # test with legacy writer def lw(*args, **kwargs): mode = 'vtk-binary' if write_binary else 'vtk-ascii' return legacy_writer.write(mode, *args, **kwargs) # The legacy writer only writes with low precision. helpers.write_read(lw, meshio.vtk_io.read, mesh, 1.0e-11) return @pytest.mark.parametrize('mesh', test_set) @pytest.mark.parametrize('write_binary', [True, False]) def test_legacy_reader(mesh, write_binary): def writer(*args, **kwargs): return meshio.vtk_io.write(*args, write_binary=write_binary, **kwargs) # test with legacy reader def lr(filename): mode = 'vtk-binary' if write_binary else 'vtk-ascii' return legacy_reader.read(mode, filename) helpers.write_read(writer, lr, mesh, 1.0e-15) return if __name__ == '__main__': test(helpers.tri_mesh, write_binary=True) meshio-1.11.7/test/test_vtu.py000066400000000000000000000047011323505575200163040ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import pytest import meshio import helpers import legacy_reader import legacy_writer vtk = pytest.importorskip('vtk') test_set = [ helpers.tri_mesh, helpers.triangle6_mesh, helpers.quad_mesh, helpers.quad8_mesh, helpers.tri_quad_mesh, helpers.tet_mesh, helpers.tet10_mesh, helpers.hex_mesh, helpers.hex20_mesh, helpers.add_point_data(helpers.tri_mesh, 1), helpers.add_point_data(helpers.tri_mesh, 2), helpers.add_point_data(helpers.tri_mesh, 3), helpers.add_cell_data(helpers.tri_mesh, 1), helpers.add_cell_data(helpers.tri_mesh, 2), helpers.add_cell_data(helpers.tri_mesh, 3), ] @pytest.mark.parametrize('mesh', test_set) @pytest.mark.parametrize('write_binary', [False, True]) def test(mesh, write_binary): def writer(*args, **kwargs): return meshio.vtu_io.write( *args, write_binary=write_binary, # don't use pretty xml to increase test coverage pretty_xml=False, **kwargs ) # ASCII files are only meant for debugging, VTK stores only 11 digits # tol = 1.0e-15 if write_binary else 1.0e-11 helpers.write_read(writer, meshio.vtu_io.read, mesh, tol) return @pytest.mark.parametrize('mesh', test_set) @pytest.mark.parametrize('write_binary', [False, True]) def test_legacy_writer(mesh, write_binary): # test with legacy writer def lw(*args, **kwargs): mode = 'vtu-binary' if write_binary else 'vtu-ascii' return legacy_writer.write(mode, *args, **kwargs) # The legacy writer only writes with low precision. tol = 1.0e-15 if write_binary else 1.0e-11 helpers.write_read(lw, meshio.vtu_io.read, mesh, tol) return @pytest.mark.parametrize('mesh', test_set) @pytest.mark.parametrize('write_binary', [False, True]) def test_legacy_reader(mesh, write_binary): def writer(*args, **kwargs): return meshio.vtu_io.write(*args, write_binary=write_binary, **kwargs) # test with legacy reader def lr(filename): mode = 'vtu-binary' if write_binary else 'vtu-ascii' return legacy_reader.read(mode, filename) # the legacy reader only reads at low precision tol = 1.0e-15 if write_binary else 1.0e-11 helpers.write_read(writer, lr, mesh, tol) return if __name__ == '__main__': test(helpers.tet10_mesh, write_binary=False) meshio-1.11.7/test/test_xdmf.py000066400000000000000000000051621323505575200164260ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import pytest import meshio import helpers import legacy_reader import legacy_writer vtk = pytest.importorskip('vtk') test_set_full = [ helpers.tri_mesh, helpers.triangle6_mesh, helpers.quad_mesh, helpers.quad8_mesh, helpers.tri_quad_mesh, helpers.tet_mesh, helpers.tet10_mesh, helpers.hex_mesh, helpers.hex20_mesh, helpers.add_point_data(helpers.tri_mesh, 1), helpers.add_cell_data(helpers.tri_mesh, 1), ] test_set_reduced = [ helpers.tri_mesh, helpers.quad_mesh, helpers.tri_quad_mesh, helpers.tet_mesh, helpers.hex_mesh, helpers.add_point_data(helpers.tri_mesh, 1), helpers.add_cell_data(helpers.tri_mesh, 1), ] @pytest.mark.parametrize('mesh', test_set_full) @pytest.mark.parametrize('data_format', ['XML', 'Binary', 'HDF']) def test_xdmf3(mesh, data_format): def write(*args, **kwargs): return meshio.xdmf_io.write(*args, data_format=data_format, **kwargs) helpers.write_read( write, meshio.xdmf_io.read, mesh, 1.0e-15 ) return @pytest.mark.skipif(not hasattr(vtk, 'vtkXdmf3Writer'), reason='Need XDMF3') @pytest.mark.parametrize('mesh', test_set_reduced) def test_xdmf3_legacy_writer(mesh): # test with legacy writer def lw(*args, **kwargs): return legacy_writer.write('xdmf3', *args, **kwargs) helpers.write_read( lw, meshio.xdmf_io.read, mesh, 1.0e-15 ) return @pytest.mark.skipif(not hasattr(vtk, 'vtkXdmf3Reader'), reason='Need XDMF3') @pytest.mark.parametrize('mesh', test_set_reduced) def test_xdmf3_legacy_reader(mesh): # test with legacy reader def lr(filename): return legacy_reader.read('xdmf3', filename) helpers.write_read( meshio.xdmf_io.write, lr, mesh, 1.0e-15 ) return @pytest.mark.skipif(not hasattr(vtk, 'vtkXdmfWriter'), reason='Need XDMF3') @pytest.mark.parametrize('mesh', [ helpers.tri_mesh, helpers.quad_mesh, helpers.tet_mesh, helpers.hex_mesh, helpers.add_point_data(helpers.tri_mesh, 1), helpers.add_cell_data(helpers.tri_mesh, 1), ]) def test_xdmf2_legacy_writer(mesh): # test with legacy writer def lw(*args, **kwargs): return legacy_writer.write('xdmf2', *args, **kwargs) helpers.write_read( lw, meshio.xdmf_io.read, # The legacy writer stores data in only single precision # mesh, 1.0e-6 ) return if __name__ == '__main__': test_xdmf3_legacy_writer(helpers.tri_mesh) meshio-1.11.7/tools/000077500000000000000000000000001323505575200142345ustar00rootroot00000000000000meshio-1.11.7/tools/meshio-convert000077500000000000000000000067631323505575200171400ustar00rootroot00000000000000#! /usr/bin/env python3 ''' Convert a mesh file to another. ''' from __future__ import print_function import numpy import meshio def _main(): # Parse command line arguments. args = _parse_options() # read mesh data points, cells, point_data, cell_data, field_data = \ meshio.read(args.infile, file_format=args.input_format) print('Number of points: {}'.format(len(points))) print('Elements:') for tpe, elems in cells.items(): print(' Number of {}s: {}'.format(tpe, len(elems))) if point_data: print('Point data: {}'.format(', '.join(point_data.keys()))) cell_data_keys = set() for cell_type in cell_data: cell_data_keys = cell_data_keys.union(cell_data[cell_type].keys()) if cell_data_keys: print('Cell data: {}'.format(', '.join(cell_data_keys))) if args.prune: cells.pop('vertex', None) cells.pop('line', None) if 'tetra' in cells: # remove_lower_order_cells cells.pop('triangle', None) # remove_orphaned_nodes. # find which nodes are not mentioned in the cells and remove them flat_cells = cells['tetra'].flatten() orphaned_nodes = numpy.setdiff1d(numpy.arange(len(points)), flat_cells) points = numpy.delete(points, orphaned_nodes, axis=0) # also adapt the point data for key in point_data: point_data[key] = numpy.delete( point_data[key], orphaned_nodes, axis=0 ) # reset GLOBAL_ID if 'GLOBAL_ID' in point_data: point_data['GLOBAL_ID'] = numpy.arange(1, len(points)+1) # We now need to adapt the cells too. diff = numpy.zeros(len(flat_cells), dtype=flat_cells.dtype) for orphan in orphaned_nodes: diff[numpy.argwhere(flat_cells > orphan)] += 1 flat_cells -= diff cells['tetra'] = flat_cells.reshape(cells['tetra'].shape) # Some converters (like VTK) require `points` to be contiguous. points = numpy.ascontiguousarray(points) # write it out meshio.write( args.outfile, points, cells, file_format=args.output_format, point_data=point_data, cell_data=cell_data, field_data=field_data ) return def _parse_options(): '''Parse input options.''' import argparse parser = argparse.ArgumentParser( description=( 'Convert between mesh formats.' ) ) parser.add_argument( 'infile', type=str, help='mesh file to be read from' ) parser.add_argument( '--input-format', '-i', type=str, choices=meshio.input_filetypes, help='input file format', default=None ) parser.add_argument( '--output-format', '-o', type=str, choices=meshio.output_filetypes, help='output file format', default=None ) parser.add_argument( 'outfile', type=str, help='mesh file to be written to' ) parser.add_argument( '--prune', '-p', action='store_true', help='remove lower order cells, remove orphaned nodes' ) parser.add_argument( '--version', '-v', action='version', version='%(prog)s ' + ('(version %s)' % meshio.__version__) ) return parser.parse_args() if __name__ == '__main__': _main()