././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1729922990.4957538 nxtomo-1.3.0.dev9/0000755000175000017500000000000014707103656013014 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1710441677.0 nxtomo-1.3.0.dev9/LICENSE0000644000175000017500000000236214574642315014026 0ustar00paynopayno The nxtomo library goal is to provide a powerful python interface to read / write nexus NXtomo application nxtomo is distributed under the MIT license. The MIT license follows: Copyright (c) European Synchrotron Radiation Facility (ESRF) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1729922990.4957538 nxtomo-1.3.0.dev9/PKG-INFO0000644000175000017500000001003314707103656014106 0ustar00paynopaynoMetadata-Version: 2.1 Name: nxtomo Version: 1.3.0.dev9 Summary: module to create / edit NXtomo application Author-email: Henri Payno , Pierre Paleo , Alessandro Mirone , Jérôme Lesaint , Pierre-Olivier Autran License: The nxtomo library goal is to provide a powerful python interface to read / write nexus NXtomo application nxtomo is distributed under the MIT license. The MIT license follows: Copyright (c) European Synchrotron Radiation Facility (ESRF) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Project-URL: Homepage, https://gitlab.esrf.fr/tomotools/nxtomo Project-URL: Documentation, https://gitlab.esrf.fr/tomotools/nxtomo/pages Project-URL: Repository, https://gitlab.esrf.fr/tomotools/nxtomo Project-URL: Changelog, https://gitlab.esrf.fr/tomotools/nxtomo/-/blob/master/CHANGELOG.md Keywords: NXtomo,nexus,tomography,tomotools,esrf Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Science/Research Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Environment :: Console Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: Unix Classifier: Operating System :: MacOS :: MacOS X Classifier: Operating System :: POSIX Classifier: Topic :: Scientific/Engineering :: Physics Classifier: Topic :: Scientific/Engineering :: Medical Science Apps. Requires-Python: >=3.7 Description-Content-Type: text/markdown License-File: LICENSE Requires-Dist: numpy<2.0 Requires-Dist: h5py>=3.0 Requires-Dist: silx>=2.0 Requires-Dist: pyunitsystem>=2.0.0a Requires-Dist: packaging Provides-Extra: test Requires-Dist: pytest; extra == "test" Provides-Extra: doc Requires-Dist: Sphinx<5.2.0,>=4.0.0; extra == "doc" Requires-Dist: nbsphinx; extra == "doc" Requires-Dist: jupyterlab; extra == "doc" Requires-Dist: ipykernel; extra == "doc" Requires-Dist: nbconvert; extra == "doc" Requires-Dist: pandoc; extra == "doc" Requires-Dist: scikit-image; extra == "doc" Requires-Dist: h5glance; extra == "doc" Requires-Dist: jupyter_client; extra == "doc" Requires-Dist: pydata_sphinx_theme; extra == "doc" Requires-Dist: sphinx_autodoc_typehints; extra == "doc" # nxtomo the goal of this project is to provide a powerful and user friendly API to create and edit [NXtomo](https://manual.nexusformat.org/classes/applications/NXtomo.html) application Please find at https://tomotools.gitlab-pages.esrf.fr/nxtomo the latest documentation Tutorials are avaible here: https://tomotools.gitlab-pages.esrf.fr/nxtomo/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1710441677.0 nxtomo-1.3.0.dev9/README.md0000644000175000017500000000053714574642315014302 0ustar00paynopayno# nxtomo the goal of this project is to provide a powerful and user friendly API to create and edit [NXtomo](https://manual.nexusformat.org/classes/applications/NXtomo.html) application Please find at https://tomotools.gitlab-pages.esrf.fr/nxtomo the latest documentation Tutorials are avaible here: https://tomotools.gitlab-pages.esrf.fr/nxtomo/ ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1729922990.4917538 nxtomo-1.3.0.dev9/doc/0000755000175000017500000000000014707103656013561 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757723.0 nxtomo-1.3.0.dev9/doc/conf.py0000644000175000017500000000500214676676633015074 0ustar00paynopayno# -- Project information ----------------------------------------------------- project = "nxtomo" copyright = "2023, ESRF" author = "P.Paleo, H.Payno, A. Mirone, J.Lesaint" # The full version, including alpha/beta/rc tags release = "1.3-dev" version = release # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosectionlabel", "sphinx.ext.intersphinx", "sphinx.ext.viewcode", "sphinx.ext.doctest", "sphinx.ext.inheritance_diagram", "sphinx.ext.autosummary", "nbsphinx", "sphinx_autodoc_typehints", ] # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "pydata_sphinx_theme" # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" html_logo = "img/nxtomo.png" # autosummary options autosummary_generate = True autosummary_imported_members = True autodoc_default_flags = [ "members", "undoc-members", "show-inheritance", ] html_theme_options = { "icon_links": [ { "name": "pypi", "url": "https://pypi.org/project/nxtomo", "icon": "_static/navbar_icons/pypi.svg", "type": "local", }, { "name": "gitlab", "url": "https://gitlab.esrf.fr/tomotools/nxtomo", "icon": "_static/navbar_icons/gitlab.svg", "type": "local", }, ], "show_toc_level": 1, "navbar_align": "left", "show_version_warning_banner": True, "navbar_start": ["navbar-logo", "version"], "navbar_center": ["navbar-nav"], "footer_start": ["copyright"], "footer_center": ["sphinx-version"], } ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1729922990.4917538 nxtomo-1.3.0.dev9/nxtomo/0000755000175000017500000000000014707103656014340 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1721904402.0 nxtomo-1.3.0.dev9/nxtomo/__init__.py0000644000175000017500000000034314650426422016445 0ustar00paynopayno""" module to edit, load and save `NXtomo application `_. """ from nxtomo.version import version as __version__ # noqa F401 from .application.nxtomo import NXtomo # noqa F401 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1729922990.4917538 nxtomo-1.3.0.dev9/nxtomo/application/0000755000175000017500000000000014707103656016643 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757728.0 nxtomo-1.3.0.dev9/nxtomo/application/nxtomo.py0000644000175000017500000007713014676676640020566 0ustar00paynopayno"""Define NXtomo application and related functions and classes""" from __future__ import annotations import logging import os from copy import deepcopy from datetime import datetime from functools import partial from operator import is_not import h5py import numpy from silx.io.url import DataUrl from silx.utils.proxy import docstring from silx.io.utils import open as hdf5_open from pyunitsystem.energysystem import EnergySI from nxtomo.paths.nxtomo import LATEST_VERSION as LATEST_NXTOMO_VERSION from nxtomo.paths.nxtomo import get_paths as get_nexus_paths from nxtomo.nxobject.nxmonitor import NXmonitor from nxtomo.nxobject.nxdetector import ImageKey from nxtomo.nxobject.nxinstrument import NXinstrument from nxtomo.nxobject.nxobject import ElementWithUnit, NXobject from nxtomo.nxobject.nxsample import NXsample from nxtomo.utils import get_data, get_data_and_unit from nxtomo.utils.io import deprecated_warning _logger = logging.getLogger(__name__) __all__ = ["NXtomo", "copy_nxtomo_file"] class NXtomo(NXobject): """ Class defining an NXTomo. His final goal is to save data to disk. :param node_name: node_name is used by the NXobject parent to order children when dumping it to file. has NXtomo is expected to be the highest object in the hierachy. node_name will only be used for saving if no `data_path` is provided when calling 'save' function. :param parent: parent of this NXobject. Most likely None for NXtomo """ def __init__(self, node_name: str = "", parent: NXobject | None = None) -> None: if node_name not in (None, ""): deprecated_warning( type_="parameter", name="node_name", replacement="None - has been removed", since_version="1.1", reason="node_name has been removed to simplify usage. Location must be provided directly in the 'save' function using the 'data_path' parameter", ) super().__init__(node_name="", parent=parent) self._set_freeze(False) self._start_time = None self._end_time = None self._instrument = NXinstrument(node_name="instrument", parent=self) self._sample = NXsample(node_name="sample", parent=self) self._control = NXmonitor(node_name="control", parent=self) self._group_size = None self._bliss_original_files = None # warning: output will be different if set to None (dataset not exported) or an empty tuple (exported but empty) self._energy = ElementWithUnit( default_unit=EnergySI.KILOELECTRONVOLT ) # energy in kev self._title = None self._set_freeze(True) @property def start_time(self) -> datetime | str | None: return self._start_time @start_time.setter def start_time(self, start_time: datetime | str | None): if not isinstance(start_time, (type(None), datetime, str)): raise TypeError( f"start_time is expected ot be an instance of datetime or None. Not {type(start_time)}" ) self._start_time = start_time @property def end_time(self) -> datetime | str | None: return self._end_time @end_time.setter def end_time(self, end_time: datetime | str | None): if not isinstance(end_time, (type(None), datetime, str)): raise TypeError( f"end_time is expected ot be an instance of datetime or None. Not {type(end_time)}" ) self._end_time = end_time @property def title(self) -> str | None: return self._title @title.setter def title(self, title: str | None): if isinstance(title, numpy.ndarray): # handle diamond use case title = str(title) elif not isinstance(title, (type(None), str)): raise TypeError( f"title is expected ot be an instance of str or None. Not {type(title)}" ) self._title = title @property def instrument(self) -> NXinstrument | None: return self._instrument @instrument.setter def instrument(self, instrument: NXinstrument | None) -> None: if not isinstance(instrument, (type(None), NXinstrument)): raise TypeError( f"instrument is expected ot be an instance of {NXinstrument} or None. Not {type(instrument)}" ) self._instrument = instrument @property def sample(self) -> NXsample | None: return self._sample @sample.setter def sample(self, sample: NXsample | None): if not isinstance(sample, (type(None), NXsample)): raise TypeError( f"sample is expected ot be an instance of {NXsample} or None. Not {type(sample)}" ) self._sample = sample @property def control(self) -> NXmonitor | None: return self._control @control.setter def control(self, control: NXmonitor | None) -> None: if not isinstance(control, (type(None), NXmonitor)): raise TypeError( f"control is expected ot be an instance of {NXmonitor} or None. Not {type(control)}" ) self._control = control @property def energy(self) -> float | None: """ incident energy in keV """ return self._energy @energy.setter def energy(self, energy: float | None) -> None: if not isinstance(energy, (type(None), float)): raise TypeError( f"energy is expected ot be an instance of {float} or None. Not {type(energy)}" ) self._energy.value = energy @property def group_size(self) -> int | None: return self._group_size @group_size.setter def group_size(self, group_size: int | None): if not ( isinstance(group_size, (type(None), int)) or (numpy.isscalar(group_size) and not isinstance(group_size, (str, bytes))) ): raise TypeError( f"group_size is expected ot be None or a scalar. Not {type(group_size)}" ) self._group_size = group_size @property def bliss_original_files(self) -> tuple | None: return self._bliss_original_files @bliss_original_files.setter def bliss_original_files(self, files: tuple | numpy.ndarray | None): if isinstance(files, numpy.ndarray): files = tuple(files) if not isinstance(files, (type(None), tuple)): raise TypeError( f"files is expected to be None or a tuple. {type(files)} provided instead" ) self._bliss_original_files = files @docstring(NXobject) def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: if data_path is None: data_path = "" nexus_paths = get_nexus_paths(nexus_path_version) nx_dict = {} if self.sample is not None: nx_dict.update( self.sample.to_nx_dict(nexus_path_version=nexus_path_version) ) else: _logger.info("no sample found. Won't be saved") if self.instrument is not None: nx_dict.update( self.instrument.to_nx_dict(nexus_path_version=nexus_path_version) ) else: _logger.info("no instrument found. Won't be saved") if self.control is not None: nx_dict.update( self.control.to_nx_dict(nexus_path_version=nexus_path_version) ) else: _logger.info("no control found. Won't be saved") if self.start_time is not None: path_start_time = f"{self.path}/{nexus_paths.START_TIME_PATH}" if isinstance(self.start_time, datetime): start_time = self.start_time.isoformat() else: start_time = self.start_time nx_dict[path_start_time] = start_time if self.end_time is not None: path_end_time = f"{self.path}/{nexus_paths.END_TIME_PATH}" if isinstance(self.end_time, datetime): end_time = self.end_time.isoformat() else: end_time = self.end_time nx_dict[path_end_time] = end_time if self.group_size is not None: path_grp_size = f"{self.path}/{nexus_paths.GRP_SIZE_ATTR}" nx_dict[path_grp_size] = self.group_size if self.energy.value is not None: path_energy = f"{self.path}/{nexus_paths.ENERGY_PATH}" nx_dict[path_energy] = self.energy.value nx_dict["@".join([path_energy, "units"])] = str(self.energy.unit) path_beam = f"{self.path}/{nexus_paths.BEAM_PATH}" nx_dict["@".join([path_beam, "NX_class"])] = "NXbeam" if nexus_paths.VERSION > 1.0: nx_dict[f">/{self.path}/beam/incident_energy"] = ( f"/{data_path}/{self.path}/{nexus_paths.ENERGY_PATH}" ) if self.title is not None: path_title = f"{self.path}/{nexus_paths.NAME_PATH}" nx_dict[path_title] = self.title if self.bliss_original_files is not None: nx_dict[f"/{self.path}/bliss_original_files"] = self.bliss_original_files # create data group from symbolic links if self.instrument.detector.image_key is not None: nx_dict[f">/{self.path}/data/image_key"] = ( f"/{data_path}/{self.instrument.detector.path}/{nexus_paths.nx_detector_paths.IMAGE_KEY}" ) nx_dict[f">/{self.path}/data/image_key_control"] = ( f"/{data_path}/{self.instrument.detector.path}/{nexus_paths.nx_detector_paths.IMAGE_KEY_CONTROL}" ) if self.instrument.detector.data is not None: nx_dict[f">/{self.path}/data/data"] = ( f"/{data_path}/{self.instrument.detector.path}/{nexus_paths.nx_detector_paths.DATA}" ) nx_dict[f"/{self.path}/data@NX_class"] = "NXdata" nx_dict[f"/{self.path}/data@signal"] = "data" nx_dict[f"/{self.path}@default"] = "data" nx_dict[f"{self.path}/data@SILX_style/axis_scale_types"] = [ "linear", "linear", ] if self.sample.rotation_angle is not None: nx_dict[f">/{self.path}/data/rotation_angle"] = ( f"/{data_path}/{self.sample.path}/{nexus_paths.nx_sample_paths.ROTATION_ANGLE}" ) if nx_dict != {}: nx_dict[f"{self.path}@NX_class"] = "NXentry" nx_dict[f"{self.path}@definition"] = "NXtomo" nx_dict[f"{self.path}/definition"] = "NXtomo" nx_dict[f"{self.path}@version"] = nexus_paths.VERSION return nx_dict def detector_data_is_defined_by_url(self) -> bool: return self._detector_data_is_defined_by_type(DataUrl) def detector_data_is_defined_by_virtual_source(self) -> bool: return self._detector_data_is_defined_by_type(h5py.VirtualSource) def _detector_data_is_defined_by_type(self, type_): return ( self.instrument is not None and self.instrument.detector is not None and self.instrument.detector.data is not None and isinstance(self.instrument.detector.data, (str, tuple)) and isinstance(self.instrument.detector.data[0], type_) ) def load( self, file_path: str, data_path: str, detector_data_as="as_data_url" ) -> NXobject: """ Load NXtomo instance from file_path and data_path :param file_path: hdf5 file path containing the NXtomo :param data_path: location of the NXtomo :param detector_data_as: how to load detector data. Can be: * "as_virtual_source": load it as h5py's VirtualGroup * "as_data_url": load it as silx's DataUrl * "as_numpy_array": load them as a numpy array (warning: can be memory consuming since all the data will be loaded) """ possible_as_values = ("as_virtual_source", "as_data_url", "as_numpy_array") if detector_data_as not in possible_as_values: raise ValueError( f"detector_data_as is expected to be in {possible_as_values} and not {detector_data_as}" ) if not os.path.exists(file_path): raise IOError(f"{file_path} does not exists") with hdf5_open(file_path) as h5f: if data_path not in h5f: raise ValueError(f"{data_path} cannot be find in {file_path}") root_node = h5f[data_path] if "version" in root_node.attrs: nexus_version = root_node.attrs["version"] else: _logger.warning( f"Unable to find nexus version associated with {data_path}@{file_path}" ) nexus_version = LATEST_NXTOMO_VERSION nexus_paths = get_nexus_paths(nexus_version) self.energy, self.energy.unit = get_data_and_unit( file_path=file_path, data_path="/".join([data_path, nexus_paths.ENERGY_PATH]), default_unit="kev", ) start_time = get_data( file_path=file_path, data_path="/".join([data_path, nexus_paths.START_TIME_PATH]), ) try: start_time = datetime.fromisoformat(start_time) except Exception: start_time = str(start_time) if start_time is not None else None self.start_time = start_time end_time = get_data( file_path=file_path, data_path="/".join([data_path, nexus_paths.END_TIME_PATH]), ) try: end_time = datetime.fromisoformat(end_time) except Exception: end_time = str(end_time) if end_time is not None else None self.end_time = end_time self.bliss_original_files = get_data( file_path=file_path, data_path="/".join([data_path, "bliss_original_files"]), ) self.title = get_data( file_path=file_path, data_path="/".join([data_path, nexus_paths.NAME_PATH]) ) self.sample._load( file_path, "/".join([data_path, "sample"]), nexus_version=nexus_version ) self.instrument._load( file_path, "/".join([data_path, "instrument"]), nexus_version=nexus_version, detector_data_as=detector_data_as, ) self.control._load( file_path, "/".join([data_path, "control"]), nexus_version=nexus_version ) return self @staticmethod def check_consistency(nx_tomo, raises_error: bool = False): """ Ensure some key datasets have the expected number of value :param NXtomo nx_tomo: nx_tomo to check :param raises_error: if True then raise ValueError when some incoherent number of value are encounter (if missing will drop a warning only). if False then will drop warnings only """ if not isinstance(nx_tomo, NXtomo): raise TypeError( f"nx_tomo is expected to be an instance of {NXtomo}. {type(nx_tomo)} provided" ) if nx_tomo.sample is not None: n_rotation_angle = ( len(nx_tomo.sample.rotation_angle) if nx_tomo.sample.rotation_angle is not None else None ) n_x_trans = ( len(nx_tomo.sample.x_translation.value) if nx_tomo.sample.x_translation is not None else None ) n_y_trans = ( len(nx_tomo.sample.y_translation.value) if nx_tomo.sample.y_translation is not None else None ) n_z_trans = ( len(nx_tomo.sample.z_translation.value) if nx_tomo.sample.z_translation is not None else None ) else: n_rotation_angle = None n_x_trans = None n_y_trans = None n_z_trans = None if nx_tomo.instrument is not None and nx_tomo.instrument.detector is not None: frames = ( nx_tomo.instrument.detector.data if nx_tomo.instrument.detector.data is not None else None ) n_frames = len(frames) if frames is not None else None image_keys = ( nx_tomo.instrument.detector.image_key_control if nx_tomo.instrument.detector.image_key_control is not None else None ) n_image_key = len(image_keys) if image_keys is not None else None n_count_time = ( len(nx_tomo.instrument.detector.count_time.value) if nx_tomo.instrument.detector.count_time is not None else None ) else: frames = None n_frames = None n_image_key = None image_keys = None n_count_time = None n_expected_frames = max( (n_rotation_angle or 0), (n_frames or 0), (n_image_key or 0), (n_x_trans or 0), (n_y_trans or 0), (n_z_trans or 0), ) def check(nb_values, info): if nb_values is None: _logger.warning(f"{info} not defined") elif nb_values != n_expected_frames: mess = ( f"{info} has {nb_values} values when {n_expected_frames} expected" ) if raises_error: raise ValueError(mess) else: _logger.warning(mess) check(n_rotation_angle, f"{nx_tomo.node_name}.sample.rotation_angle") check(n_x_trans, f"{nx_tomo.node_name}.sample.x_translation") check(n_y_trans, f"{nx_tomo.node_name}.sample.y_translation") check(n_z_trans, f"{nx_tomo.node_name}.sample.z_translation") check(n_frames, f"{nx_tomo.node_name}.instrument.detector.data") check(n_image_key, f"{nx_tomo.node_name}.instrument.detector.image_key_control") check(n_count_time, f"{nx_tomo.node_name}.instrument.detector.count_time") tomo_n = ( nx_tomo.instrument.detector.tomo_n if ( nx_tomo.instrument is not None and nx_tomo.instrument.detector is not None ) else None ) if tomo_n is not None and frames is not None: n_projection = len(frames[image_keys == ImageKey.PROJECTION.value]) if n_projection != tomo_n: mess = f"incoherent number of projections found ({n_projection}) compared to tomo_n ({tomo_n})" if raises_error: raise ValueError(mess) else: _logger.warning(mess) @staticmethod @docstring(NXobject) def concatenate(nx_objects: tuple, node_name=""): """ concatenate a tuple of NXobject into a single NXobject :param nx_objects: :return: NXtomo instance which is the concatenation of the nx_objects """ nx_objects = tuple(filter(partial(is_not, None), nx_objects)) # filter None obj if len(nx_objects) == 0: return None # warning: later we make the assumption that nx_objects contains at least one element for nx_obj in nx_objects: if not isinstance(nx_obj, NXtomo): raise TypeError("Cannot concatenate non NXtomo object") nx_tomo = NXtomo(node_name) # check object concatenation can be handled def get_energy(): def energy_to_si(energy_with_unit): return energy_with_unit.value * energy_with_unit.unit.value current_energy_as_si = None current_energy = None for nx_obj in nx_objects: if nx_obj.energy.value is None: continue elif current_energy_as_si is None: # get energy current_energy_as_si = energy_to_si(nx_obj.energy) # convert it to expected unit current_energy = current_energy_as_si / nx_tomo.energy.unit.value elif not numpy.isclose( current_energy_as_si, energy_to_si(nx_obj.energy) ): _logger.warning( f"{nx_obj} and {nx_objects[0]} have different energy" ) return current_energy nx_tomo.energy = get_energy() _logger.info(f"title {nx_objects[0].title} will be picked") nx_tomo.title = nx_objects[0].title start_times = tuple( filter( lambda x: x is not None, [nx_obj.start_time for nx_obj in nx_objects] ) ) end_times = tuple( filter(lambda x: x is not None, [nx_obj.end_time for nx_obj in nx_objects]) ) nx_tomo.start_time = min(start_times) if len(start_times) > 0 else None nx_tomo.end_time = max(end_times) if len(end_times) > 0 else None nx_tomo.sample = NXsample.concatenate( tuple([nx_obj.sample for nx_obj in nx_objects]) ) nx_tomo.sample.parent = nx_tomo nx_tomo.instrument = NXinstrument.concatenate( tuple([nx_obj.instrument for nx_obj in nx_objects]), ) nx_tomo.instrument.parent = nx_tomo nx_tomo.control = NXmonitor.concatenate( tuple([nx_obj.control for nx_obj in nx_objects]), ) nx_tomo.control.parent = nx_tomo bliss_original_files = set() bof_only_none = True for nx_obj in nx_objects: if nx_obj.bliss_original_files is not None: # current behavior of 'bliss_original_files' is that if there is no information (None) then we won't # save it to the file as this is a pure 'esrf' information. Else if it is there (even if empty) we save it bof_only_none = False bliss_original_files.update(nx_obj.bliss_original_files) bliss_original_files = tuple( sorted(bliss_original_files) ) # it is more convenient ot have it sorted - else sorted along obj id nx_tomo.bliss_original_files = None if bof_only_none else bliss_original_files return nx_tomo def check_can_select_from_rotation_angle(self): if ( self.sample is None or self.sample.rotation_angle is None or len(self.sample.rotation_angle) == 0 ): raise ValueError( "No information on rotation angle found. Unable to do a selection based on angles" ) if self.instrument is None or self.instrument.detector is None: raise ValueError( "No detector found. Unable to do a selection based on angles" ) @docstring(NXobject) def save( self, file_path: str, data_path: str, nexus_path_version: float | None = None, overwrite: bool = False, ) -> None: # Note: we overwrite save function for NXtomo in order to force 'data_path' to be provided. # Else we get both name and data_path and increase complexity to determine # the fiinal location super().save( file_path=file_path, data_path=data_path, nexus_path_version=nexus_path_version, overwrite=overwrite, ) @staticmethod def sub_select_selection_from_angle_range( nx_tomo, start_angle: float, stop_angle: float, copy=True ): """ create a NXtomo like `nx_tomo` input but update `image_key_control` to INVALID for all projections which does not fulfill the condition: start_angle < rotation_angle < stop_angle Note: Darks and flat will not be affected by this sub selection :param start_angle: left bound to apply selection :param stop_angle: right bound to apply selection :param copy: if True then copy the nx_tomo. Else `nx_tomo` will be affected by the modifications """ nx_tomo.check_can_select_from_rotation_angle() if copy: res = deepcopy(nx_tomo) else: res = nx_tomo mask = numpy.logical_and( res.instrument.detector.image_key_control == ImageKey.PROJECTION, numpy.logical_or( res.sample.rotation_angle <= start_angle, res.sample.rotation_angle >= stop_angle, ), ) res.instrument.detector.image_key_control[mask] = ImageKey.INVALID return res @staticmethod def sub_select_from_angle_offset( nx_tomo, start_angle_offset: float, angle_interval: float | None, shift_angles: bool, copy=True, ): """ get a sub selection of the NXtomo projections thats starts with a start_angle_offset offset and covers `angle_interval` Note: Darks and flat will not be affected by this sub selection :param start_angle_offset: offset to apply to start the selection. Expected in degree. Must be signed. **The offset is always relative to the first projection angle value** :param angle_interval: interval covered by the selection. If None then will select until the end... :param shift_angles: should we shift the angles of `-start_angle_offset` (once the selection is done) :param copy: if True then copy the nx_tomo. Else `nx_tomo` will be affected by the modifications """ nx_tomo.check_can_select_from_rotation_angle() if copy: res = deepcopy(nx_tomo) else: res = nx_tomo if shift_angles: # for the shift we shift all the projection angle. Simpler mask_shift = ( res.instrument.detector.image_key_control == ImageKey.PROJECTION ) if angle_interval is None: tmp_proj = numpy.asarray(res.sample.rotation_angle) angle_interval = abs(tmp_proj.max() - tmp_proj.min()) + abs( start_angle_offset ) else: angle_interval = abs(angle_interval) # determine start and stop angle projection_angles = res.sample.rotation_angle[ res.instrument.detector.image_key_control == ImageKey.PROJECTION ] if len(projection_angles) < 2 or projection_angles[1] > projection_angles[0]: # rotate with positive angles start_angle = projection_angles[0] + start_angle_offset stop_angle = projection_angles[0] + start_angle_offset + angle_interval else: # rotate with negative angles start_angle = projection_angles[0] + start_angle_offset stop_angle = projection_angles[0] + start_angle_offset - angle_interval NXtomo.sub_select_selection_from_angle_range( res, start_angle=start_angle, stop_angle=stop_angle, copy=False ) # apply rotation angle shift if needed if shift_angles: res.sample.rotation_angle[mask_shift] -= start_angle_offset return res @staticmethod def clamp_angles(nx_tomo, angle_range, offset=0, copy=True, image_keys=None): if copy: res = deepcopy(nx_tomo) else: res = nx_tomo if image_keys is None: image_keys = ImageKey.values() mask_shift = numpy.logical_or( *( [ res.instrument.detector.image_key_control == ImageKey.from_value(image_key) for image_key in image_keys ] ) ) res.sample.rotation_angle[mask_shift] -= offset res.sample.rotation_angle[mask_shift] = ( res.sample.rotation_angle[mask_shift] % angle_range ) return res @staticmethod def get_valid_entries(file_path: str) -> tuple: """ return the list of 'Nxtomo' entries at the root level :param file_path: :return: list of valid Nxtomo node (ordered alphabetically) ..note: entries are sorted to insure consistency """ if not os.path.isfile(file_path): raise ValueError("given file path should be a file") def browse_group(group): res_buf = [] for entry_alias in group.keys(): entry = group.get(entry_alias) if isinstance(entry, h5py.Group): if NXtomo.node_is_nxtomo(entry): res_buf.append(entry.name) else: res_buf.extend(browse_group(entry)) return res_buf with hdf5_open(file_path) as h5f: res = browse_group(h5f) res.sort() return tuple(res) @staticmethod def node_is_nxtomo(node: h5py.Group) -> bool: """check if the given h5py node is an nxtomo node or not""" if "NX_class" in node.attrs or "NXclass" in node.attrs: _logger.info(node.name + " is recognized as an nx class.") else: _logger.info(node.name + " is node an nx class.") return False if "definition" in node.attrs and node.attrs["definition"].lower() == "nxtomo": _logger.info(node.name + " is recognized as an NXtomo class.") return True elif ( "instrument" in node and "NX_class" in node["instrument"].attrs and node["instrument"].attrs["NX_class"] in ( "NXinstrument", b"NXinstrument", ) # b"NXinstrument" is needed for Diamond comptibility ): return "detector" in node["instrument"] else: return False def copy_nxtomo_file( input_file: str, output_file: str, entries: tuple | None, overwrite: bool = False, vds_resolution="update", ): """ copy one or several NXtomo from a file to another file (solving relative links) :param input_file: nexus file for hich NXtomo have to be copied :param output_file: output file :param entries: entries to be copied. If set to None then all entries will be copied :param overwrite: overwrite data path if already exists :param vds_resolution: How to solve virtual datasets. Options are: * update: update Virtual source (relative) paths according to the new location of the file * remove: replace the virtual data source by copying directly the resulting dataset. Warning: in this case all the dataset will be load in memory In the future next option could be: * embed: copy all VDS to new datasets in the output file 'as they are' (avoid to load all the data in memory) """ input_file = os.path.abspath(input_file) output_file = os.path.abspath(output_file) if input_file == output_file: raise ValueError("input file and output file are the same") if entries is None: entries = NXtomo.get_valid_entries(file_path=input_file) if len(entries) == 0: _logger.warning(f"no valid entries for {input_file}") for entry in entries: if vds_resolution == "remove": detector_data_as = "as_numpy_array" elif vds_resolution == "update": detector_data_as = "as_data_url" else: raise ValueError( f"Unexpected value for 'vds_resolution': {vds_resolution}. Valid values are 'remove' and 'update'" ) nx_tomo = NXtomo().load(input_file, entry, detector_data_as=detector_data_as) nx_tomo.save(output_file, entry, overwrite=overwrite) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1729922990.4917538 nxtomo-1.3.0.dev9/nxtomo/application/tests/0000755000175000017500000000000014707103656020005 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729872400.0 nxtomo-1.3.0.dev9/nxtomo/application/tests/test_nxtomo.py0000644000175000017500000005223414706741020022740 0ustar00paynopaynoimport os import h5py from datetime import datetime import numpy import pytest from silx.io.url import DataUrl from silx.io.utils import h5py_read_dataset from nxtomo.application.nxtomo import NXtomo, copy_nxtomo_file from nxtomo.io import HDF5File from nxtomo.nxobject.nxdetector import FieldOfView, ImageKey from nxtomo.nxobject.utils import concatenate from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor try: import tifffile except ImportError: has_tiffile = False else: from nxtomo.utils.utils import create_detector_dataset_from_tiff has_tiffile = True nexus_path_versions = (1.4, 1.3, 1.2, 1.1, 1.0, None) @pytest.mark.parametrize("nexus_path_version", nexus_path_versions) def test_nx_tomo(nexus_path_version, tmp_path): nx_tomo = NXtomo(node_name="") # check start time with pytest.raises(TypeError): nx_tomo.start_time = 12 nx_tomo.start_time = datetime.now() # check end time with pytest.raises(TypeError): nx_tomo.end_time = 12 nx_tomo.end_time = datetime(2022, 2, 27) # check sample with pytest.raises(TypeError): nx_tomo.sample = "tata" # check detector with pytest.raises(TypeError): nx_tomo.instrument.detector = "tata" # check energy with pytest.raises(TypeError): nx_tomo.energy = "tata" nx_tomo.energy = 12.3 # check group size with pytest.raises(TypeError): nx_tomo.group_size = "tata" nx_tomo.group_size = 3 # check title with pytest.raises(TypeError): nx_tomo.title = 12 nx_tomo.title = "title" # check instrument with pytest.raises(TypeError): nx_tomo.instrument = "test" # check we can't set undefined attributes with pytest.raises(AttributeError): nx_tomo.test = 12 # create detector for test projections = numpy.random.random(100 * 100 * 8).reshape([8, 100, 100]) flats_1 = numpy.random.random(100 * 100 * 2).reshape([2, 100, 100]) darks = numpy.random.random(100 * 100 * 3).reshape([3, 100, 100]) flats_2 = numpy.random.random(100 * 100 * 2).reshape([2, 100, 100]) alignment = numpy.random.random(100 * 100 * 1).reshape([1, 100, 100]) nx_tomo.instrument.detector.data = numpy.concatenate( [ darks, flats_1, projections, flats_2, alignment, ] ) nx_tomo.instrument.detector.image_key_control = numpy.concatenate( [ [ImageKey.DARK_FIELD] * 3, [ImageKey.FLAT_FIELD] * 2, [ImageKey.PROJECTION] * 8, [ImageKey.FLAT_FIELD] * 2, [ImageKey.ALIGNMENT] * 1, ] ) nx_tomo.instrument.detector.x_pixel_size = ( nx_tomo.instrument.detector.y_pixel_size ) = 1e-7 nx_tomo.instrument.detector.distance = 0.2 nx_tomo.instrument.detector.field_of_view = FieldOfView.HALF nx_tomo.instrument.detector.count_time = numpy.concatenate( [ [0.2] * 3, # darks [0.1] * 2, # flats 1 [0.1] * 8, # projections [0.1] * 2, # flats 2 [0.1] * 1, # alignment ] ) # create sample for test nx_tomo.sample.name = "my sample" nx_tomo.sample.rotation_angle = numpy.concatenate( [ [0.0] * 3, # darks [0.0] * 2, # flats 1 numpy.linspace(0, 180, num=8, endpoint=False), # projections [180.0] * 2, # flats 2 [0.0], # alignment ] ) if nexus_path_version is None or nexus_path_version >= 1.4: # create source and detector for test nx_tomo.instrument.source.distance = 3.6 nx_tomo.instrument.detector.y_rotation_axis_pixel_position = 1.1 nx_tomo.instrument.detector.x_rotation_axis_pixel_position = 1.2 n_frames = 3 + 2 + 8 + 2 + 1 nx_tomo.sample.x_translation = [0.6] * n_frames nx_tomo.sample.y_translation = [0.2] * n_frames nx_tomo.sample.z_translation = [0.1] * n_frames assert nx_tomo.is_root is True assert nx_tomo.instrument.is_root is False assert ( nx_tomo.root_path == nx_tomo.instrument.root_path == nx_tomo.instrument.detector.root_path ) NXtomo.check_consistency(nx_tomo=nx_tomo, raises_error=True) folder = tmp_path / "test_folder" folder.mkdir() file_path = os.path.join(folder, "nexus_file.hdf5") nx_tomo.save( file_path=file_path, data_path="entry", nexus_path_version=nexus_path_version, ) assert os.path.exists(file_path) # insure we can read it back scan = NXtomo().load(file_path, data_path="entry") assert ( len( tuple( filter( lambda image_key: image_key is ImageKey.FLAT_FIELD, scan.instrument.detector.image_key_control, ) ) ) == 4 ) assert ( len( tuple( filter( lambda image_key: image_key is ImageKey.DARK_FIELD, scan.instrument.detector.image_key_control, ) ) ) == 3 ) assert ( len( tuple( filter( lambda image_key: image_key is ImageKey.PROJECTION, scan.instrument.detector.image_key_control, ) ) ) == 8 ) assert ( len( tuple( filter( lambda image_key: image_key is ImageKey.ALIGNMENT, scan.instrument.detector.image_key_control, ) ) ) == 1 ) assert scan.energy.value == 12.3 assert scan.instrument.detector.x_pixel_size.value == 1e-7 assert scan.instrument.detector.y_pixel_size.value == 1e-7 assert scan.instrument.detector.distance.value == 0.2 assert scan.instrument.detector.field_of_view == FieldOfView.HALF assert scan.sample.name == "my sample" assert ( len(scan.sample.x_translation.value) == len(scan.sample.y_translation.value) == len(scan.sample.z_translation.value) == n_frames ) assert scan.sample.x_translation.value[0] == 0.6 assert scan.sample.y_translation.value[0] == 0.2 assert scan.sample.z_translation.value[0] == 0.1 if nexus_path_version != 1.0: assert scan.instrument.source.name is not None assert scan.instrument.source.type is not None if nexus_path_version is None or nexus_path_version >= 1.4: assert nx_tomo.instrument.source.distance.si_value == 3.6 assert nx_tomo.instrument.detector.y_rotation_axis_pixel_position == 1.1 assert nx_tomo.instrument.detector.x_rotation_axis_pixel_position == 1.2 # try to load it from the disk loaded_nx_tomo = NXtomo("test").load(file_path=file_path, data_path="entry") assert isinstance(loaded_nx_tomo, NXtomo) assert loaded_nx_tomo.energy.value == nx_tomo.energy.value assert loaded_nx_tomo.energy.unit == nx_tomo.energy.unit assert loaded_nx_tomo.start_time == nx_tomo.start_time assert loaded_nx_tomo.end_time == nx_tomo.end_time if nexus_path_version is None or nexus_path_version >= 1.4: assert ( loaded_nx_tomo.instrument.source.distance.si_value == nx_tomo.instrument.source.distance.si_value ) assert ( loaded_nx_tomo.instrument.detector.y_rotation_axis_pixel_position == nx_tomo.instrument.detector.y_rotation_axis_pixel_position ) assert ( loaded_nx_tomo.instrument.detector.x_rotation_axis_pixel_position == nx_tomo.instrument.detector.x_rotation_axis_pixel_position ) assert ( loaded_nx_tomo.instrument.detector.x_pixel_size.value == nx_tomo.instrument.detector.x_pixel_size.value ) assert ( loaded_nx_tomo.instrument.detector.x_pixel_size.unit == nx_tomo.instrument.detector.x_pixel_size.unit ) assert ( loaded_nx_tomo.instrument.detector.y_pixel_size.value == nx_tomo.instrument.detector.y_pixel_size.value ) assert ( loaded_nx_tomo.instrument.detector.field_of_view == nx_tomo.instrument.detector.field_of_view ) numpy.testing.assert_array_equal( loaded_nx_tomo.instrument.detector.count_time.value, nx_tomo.instrument.detector.count_time.value, ) assert ( loaded_nx_tomo.instrument.detector.count_time.unit == nx_tomo.instrument.detector.count_time.unit ) assert ( loaded_nx_tomo.instrument.detector.distance.value == nx_tomo.instrument.detector.distance.value ) assert ( loaded_nx_tomo.instrument.detector.distance.unit == nx_tomo.instrument.detector.distance.unit ) numpy.testing.assert_array_equal( loaded_nx_tomo.instrument.detector.image_key_control, nx_tomo.instrument.detector.image_key_control, ) numpy.testing.assert_array_equal( loaded_nx_tomo.instrument.detector.image_key, nx_tomo.instrument.detector.image_key, ) assert loaded_nx_tomo.sample.name == nx_tomo.sample.name assert loaded_nx_tomo.sample.rotation_angle is not None numpy.testing.assert_array_almost_equal( loaded_nx_tomo.sample.rotation_angle, nx_tomo.sample.rotation_angle ) numpy.testing.assert_array_almost_equal( loaded_nx_tomo.sample.x_translation.value, nx_tomo.sample.x_translation.value, ) numpy.testing.assert_array_almost_equal( loaded_nx_tomo.sample.y_translation.value, nx_tomo.sample.y_translation.value, ) numpy.testing.assert_array_almost_equal( loaded_nx_tomo.sample.z_translation.value, nx_tomo.sample.z_translation.value, ) loaded_nx_tomo = NXtomo("test").load( file_path=file_path, data_path="entry", detector_data_as="as_numpy_array" ) numpy.testing.assert_array_almost_equal( loaded_nx_tomo.instrument.detector.data, nx_tomo.instrument.detector.data, ) loaded_nx_tomo = NXtomo("test").load( file_path=file_path, data_path="entry", detector_data_as="as_data_url" ) assert isinstance(loaded_nx_tomo.instrument.detector.data[0], DataUrl) with pytest.raises(ValueError): # check an error is raise because the dataset is not virtual loaded_nx_tomo = NXtomo("test").load( file_path=file_path, data_path="entry", detector_data_as="as_virtual_source", ) # test concatenation nx_tomo_concat = concatenate([loaded_nx_tomo, None, loaded_nx_tomo]) concat_file = os.path.join(folder, "concatenated_nexus_file.hdf5") nx_tomo_concat.save( file_path=concat_file, data_path="myentry", nexus_path_version=nexus_path_version, ) loaded_concatenated_nx_tomo = NXtomo("test").load( file_path=concat_file, data_path="myentry", detector_data_as="as_virtual_source", ) numpy.testing.assert_array_almost_equal( loaded_concatenated_nx_tomo.sample.rotation_angle, numpy.concatenate( [ nx_tomo.sample.rotation_angle, nx_tomo.sample.rotation_angle, ] ), ) numpy.testing.assert_array_almost_equal( loaded_concatenated_nx_tomo.sample.x_translation.value, numpy.concatenate( [ nx_tomo.sample.x_translation.value, nx_tomo.sample.x_translation.value, ] ), ) with pytest.raises(TypeError): concatenate([1, 2]) with h5py.File(concat_file, mode="r") as h5f: h5py_read_dataset(h5f["myentry/definition"]) == "NXtomo" if nexus_path_version is None or nexus_path_version >= 1.4: assert ( loaded_concatenated_nx_tomo.instrument.source.distance.si_value == loaded_nx_tomo.instrument.source.distance.si_value ) assert ( loaded_concatenated_nx_tomo.instrument.detector.x_rotation_axis_pixel_position == loaded_nx_tomo.instrument.detector.x_rotation_axis_pixel_position ) assert ( loaded_concatenated_nx_tomo.instrument.detector.y_rotation_axis_pixel_position == loaded_nx_tomo.instrument.detector.y_rotation_axis_pixel_position ) @pytest.mark.parametrize("nexus_path_version", nexus_path_versions) def test_nx_tomo_subselection(nexus_path_version): """ test sub_select_from_projection_angle_range """ nx_tomo = NXtomo() nx_tomo.energy = 12.3 shape = (12, 12) data_dark = numpy.ones(shape) data_flat = numpy.ones(shape) * 2.0 data_projection = numpy.ones(shape) * 3.0 str(nx_tomo) nx_tomo.instrument.detector.data = numpy.concatenate( ( data_dark, data_dark, data_flat, data_projection, data_projection, data_projection, data_flat, data_projection, data_projection, data_projection, data_flat, ) ) nx_tomo.instrument.detector.image_key_control = numpy.array( ( ImageKey.DARK_FIELD, ImageKey.DARK_FIELD, ImageKey.FLAT_FIELD, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.FLAT_FIELD, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.FLAT_FIELD, ) ) original_angles = numpy.array( ( 0, 0, 0, 10, 20.5, 22.5, 180, 180, 200, 300.2, 300.2, ) ) nx_tomo.sample.rotation_angle = original_angles nx_tomo_sub_1 = NXtomo.sub_select_from_angle_offset( nx_tomo=nx_tomo, start_angle_offset=10, angle_interval=12.5, shift_angles=False, ) numpy.testing.assert_equal( nx_tomo_sub_1.instrument.detector.image_key_control, numpy.array( ( ImageKey.DARK_FIELD, ImageKey.DARK_FIELD, ImageKey.FLAT_FIELD, ImageKey.INVALID, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.FLAT_FIELD, ImageKey.INVALID, ImageKey.INVALID, ImageKey.INVALID, ImageKey.FLAT_FIELD, ) ), ) numpy.testing.assert_equal( nx_tomo_sub_1.sample.rotation_angle, original_angles, ) nx_tomo_sub_2 = NXtomo.sub_select_from_angle_offset( nx_tomo=nx_tomo, start_angle_offset=10, angle_interval=20, shift_angles=True, ) numpy.testing.assert_equal( nx_tomo_sub_2.sample.rotation_angle[0:3], 0.0, ) numpy.testing.assert_array_equal( nx_tomo_sub_2.sample.rotation_angle[3:6], numpy.array([0.0, 10.5, 12.5]), ) nx_tomo_sub_3 = NXtomo.sub_select_from_angle_offset( nx_tomo=nx_tomo, start_angle_offset=-10, angle_interval=300, shift_angles=False, ) numpy.testing.assert_equal( nx_tomo_sub_3.instrument.detector.image_key_control, numpy.array( ( ImageKey.DARK_FIELD, ImageKey.DARK_FIELD, ImageKey.FLAT_FIELD, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.FLAT_FIELD, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.INVALID, ImageKey.FLAT_FIELD, ) ), ) nx_tomo_sub_4 = NXtomo.sub_select_from_angle_offset( nx_tomo=nx_tomo, start_angle_offset=-10, angle_interval=None, shift_angles=False, ) numpy.testing.assert_equal( nx_tomo_sub_4.instrument.detector.image_key_control, numpy.array( ( ImageKey.DARK_FIELD, ImageKey.DARK_FIELD, ImageKey.FLAT_FIELD, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.FLAT_FIELD, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.PROJECTION, ImageKey.FLAT_FIELD, ) ), ) def test_bliss_original_files(tmp_path): """ test about NXtomo.bliss_original_files """ test_dir = tmp_path / "test_bliss_original_files" test_dir.mkdir() nx_tomo_1 = NXtomo() with pytest.raises(TypeError): nx_tomo_1.bliss_original_files = 12 nx_tomo_1.bliss_original_files = ("/path/1", "/path/2") nx_tomo_2 = NXtomo() nx_tomo_2.bliss_original_files = ("/path/2", "/path/3") nx_tomo_3 = NXtomo() nx_tomo_4 = NXtomo() nx_tomo_4.bliss_original_files = () nx_tomo_concat = concatenate([nx_tomo_1, nx_tomo_2, nx_tomo_3]) assert nx_tomo_concat.bliss_original_files == ("/path/1", "/path/2", "/path/3") output_nx_tomo_concat = os.path.join(test_dir, "nx_concat.nx") nx_tomo_concat.save(output_nx_tomo_concat, "/entry_concat") loaded_nx_tomo = NXtomo().load(output_nx_tomo_concat, "/entry_concat") assert loaded_nx_tomo.bliss_original_files == ("/path/1", "/path/2", "/path/3") output_nx_tomo_file = os.path.join(test_dir, "nx_tomo.nx") nx_tomo_3.save(output_nx_tomo_file, "/entry0000") loaded_nx_tomo = NXtomo().load(output_nx_tomo_file, "/entry0000") assert loaded_nx_tomo.bliss_original_files is None nx_tomo_4.save(output_nx_tomo_file, "/entry0000", overwrite=True) loaded_nx_tomo = NXtomo().load(output_nx_tomo_file, "/entry0000") assert loaded_nx_tomo.bliss_original_files == () @pytest.mark.parametrize("vds_resolution", ("update", "remove")) def test_copy_nxtomo_file(tmp_path, vds_resolution): """test 'copy_nxtomo_file' function""" input_folder = tmp_path / "input" input_folder.mkdir() input_nx_tomo_file = os.path.join(input_folder, "nexus.nx") output_folder = tmp_path / "output" output_folder.mkdir() nx_tomo = NXtomo() nx_tomo.save(input_nx_tomo_file, "/entry0000") output_file = os.path.join(output_folder, "nxtomo.nx") copy_nxtomo_file( input_nx_tomo_file, entries=None, output_file=output_file, vds_resolution=vds_resolution, ) assert os.path.exists(output_file) def test_multiple_readers(tmp_path): """ test that several reader can access the file in parallel with thread pool or process pool """ input_folder = tmp_path / "input" input_folder.mkdir() input_nx_tomo_file = os.path.join(input_folder, "nexus.nx") output_folder = tmp_path / "output" output_folder.mkdir() nx_tomo = NXtomo() detector_data = numpy.linspace(0, 100, 1000).reshape(10, 10, 10) nx_tomo.instrument.detector.data = detector_data nx_tomo.save(input_nx_tomo_file, "/entry0000") from time import sleep def read_data(): with HDF5File(input_nx_tomo_file, mode="r") as h5f: # with h5py.File(input_nx_tomo_file, mode="r") as h5f: sleep(0.2) return h5f["/entry0000/instrument/detector/data"][()] futures = [] with ThreadPoolExecutor(max_workers=1) as executor: for _ in range(10): futures.append(executor.submit(read_data)) for future in futures: numpy.testing.assert_array_equal(future.result(), detector_data) with ProcessPoolExecutor() as executor: results = executor.map(read_data) for result in results: numpy.testing.assert_array_equal(result, detector_data) @pytest.mark.skipif(not has_tiffile, reason="tiffile not installed") @pytest.mark.parametrize("dtype", (numpy.uint16, numpy.float32)) @pytest.mark.parametrize("provide_dtype", (True, False)) @pytest.mark.parametrize("relative_link", (True, False)) def test_nxtomo_from_tiff(tmp_path, dtype, provide_dtype, relative_link): """ test creation of an nxtomo from a set of .tiff files """ tiffile_folder = tmp_path / "tiffs" tiffile_folder.mkdir() tiff_files = [] raw_data = numpy.linspace( start=0, stop=1000, num=1000, dtype=dtype, ).reshape(10, 10, 10) for i in range(10): tiff_file = os.path.join(tiffile_folder, f"my_file{i}.tif") tifffile.imwrite(tiff_file, raw_data[i]) tiff_files.append(tiff_file) output_nxtomo = os.path.join(tmp_path, "my_nxtomo.nx") nxtomo = NXtomo() with h5py.File(output_nxtomo, mode="w") as h5f: external_dataset_group = h5f.require_group("external_datasets") nxtomo.instrument.detector.data = create_detector_dataset_from_tiff( tiff_files=tiff_files, external_dataset_group=external_dataset_group, dtype=dtype if provide_dtype else None, relative_link=relative_link, ) nxtomo.save( file_path=output_nxtomo, data_path="entry0000", ) loaded_nxtomo = NXtomo().load( output_nxtomo, "entry0000", detector_data_as="as_numpy_array" ) numpy.testing.assert_array_equal( loaded_nxtomo.instrument.detector.data, raw_data, ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757728.0 nxtomo-1.3.0.dev9/nxtomo/io.py0000644000175000017500000001302414676676640015336 0ustar00paynopayno""" some io utils to handle `nexus `_ and `hdf5 `_ with `h5py `_ """ from __future__ import annotations import logging import os from contextlib import contextmanager import h5py._hl.selections as selection from silx.io.url import DataUrl from h5py import File as HDF5File # noqa F401 from silx.io.utils import open as hdf5_open import h5py _logger = logging.getLogger(__name__) __all__ = [ "get_swmr_mode", "check_virtual_sources_exist", "from_data_url_to_virtual_source", "from_virtual_source_to_data_url", "cwd_context", "to_target_rel_path", ] _DEFAULT_SWMR_MODE = None def get_swmr_mode() -> bool | None: """ Return True if the swmr should be used in the tomoools scope """ swmr_mode = os.environ.get("TOMOTOOLS_SWMR", _DEFAULT_SWMR_MODE) if swmr_mode in (None, "None", "NONE"): return None else: return swmr_mode in ( True, "True", "true", "TRUE", "1", 1, ) def check_virtual_sources_exist(fname, data_path): """ Check that a virtual dataset points to actual data. :param fname: HDF5 file path :param data_path: Path within the HDF5 file :return res: Whether the virtual dataset points to actual data. """ with hdf5_open(fname) as f: if data_path not in f: _logger.error(f"No dataset {data_path} in file {fname}") return False dptr = f[data_path] if not dptr.is_virtual: return True for vsource in dptr.virtual_sources(): vsource_fname = os.path.join( os.path.dirname(dptr.file.filename), vsource.file_name ) if not os.path.isfile(vsource_fname): _logger.error(f"No such file: {vsource_fname}") return False elif not check_virtual_sources_exist(vsource_fname, vsource.dset_name): _logger.error(f"Error with virtual source {vsource_fname}") return False return True def from_data_url_to_virtual_source(url: DataUrl, target_path: str | None) -> tuple: """ convert a DataUrl to a set (as tuple) of h5py.VirtualSource :param url: url to be converted to a virtual source. It must target a 2D detector :return: (h5py.VirtualSource, tuple(shape of the virtual source), numpy.drype: type of the dataset associated with the virtual source) """ if not isinstance(url, DataUrl): raise TypeError( f"url is expected to be an instance of DataUrl and not {type(url)}" ) with hdf5_open(url.file_path()) as o_h5s: original_data_shape = o_h5s[url.data_path()].shape data_type = o_h5s[url.data_path()].dtype if len(original_data_shape) == 2: original_data_shape = ( 1, original_data_shape[0], original_data_shape[1], ) vs_shape = original_data_shape if url.data_slice() is not None: vs_shape = ( url.data_slice().stop - url.data_slice().start, original_data_shape[-2], original_data_shape[-1], ) if target_path is not None and ( target_path == url.file_path() or os.path.abspath(target_path) == url.file_path() ): file_path = "." else: file_path = url.file_path() vs = h5py.VirtualSource(file_path, url.data_path(), shape=vs_shape, dtype=data_type) if url.data_slice() is not None: vs.sel = selection.select(original_data_shape, url.data_slice()) return vs, vs_shape, data_type def from_virtual_source_to_data_url(vs: h5py.VirtualSource) -> DataUrl: """ convert a h5py.VirtualSource to a DataUrl :param vs: virtual source to be converted to a DataUrl :return: url """ if not isinstance(vs, h5py.VirtualSource): raise TypeError( f"vs is expected to be an instance of h5py.VirtualSorce and not {type(vs)}" ) url = DataUrl(file_path=vs.path, data_path=vs.name, scheme="silx") return url @contextmanager def cwd_context(new_cwd=None): """ create a context with 'new_cwd'. on entry update current working directory to 'new_cwd' and reset previous 'working_directory' at exit :param new_cwd: current working directory to use in the context """ try: curdir = os.getcwd() except Exception as e: _logger.error(e) curdir = None try: if new_cwd is not None and os.path.isfile(new_cwd): new_cwd = os.path.dirname(new_cwd) if new_cwd not in (None, ""): os.chdir(new_cwd) yield finally: if curdir is not None: os.chdir(curdir) def to_target_rel_path(file_path: str, target_path: str) -> str: """ cast file_path to a relative path according to target_path. This is used to deduce h5py.VirtualSource path :param file_path: file path to be moved to relative :param target_path: target used as 'reference' to get relative path :return: relative path of file_path compared to target_path """ if file_path == target_path or os.path.abspath(file_path) == os.path.abspath( target_path ): return "." file_path = os.path.abspath(file_path) target_path = os.path.abspath(target_path) path = os.path.relpath(file_path, os.path.dirname(target_path)) if not path.startswith("./"): path = "./" + path return path ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1729922990.4957538 nxtomo-1.3.0.dev9/nxtomo/nxobject/0000755000175000017500000000000014707103656016154 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1721904402.0 nxtomo-1.3.0.dev9/nxtomo/nxobject/__init__.py0000644000175000017500000000061014650426422020256 0ustar00paynopayno""" module containing the definition of all the `NXobject `_ used (and not being NXapplication) """ from .nxdetector import NXdetector # noqa F401 from .nxobject import NXobject # noqa F401 from .nxsample import NXsample # noqa F401 from .nxsource import NXsource # noqa F401 from .utils import concatenate # noqa F401 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729921482.0 nxtomo-1.3.0.dev9/nxtomo/nxobject/nxdetector.py0000644000175000017500000013122014707100712020672 0ustar00paynopayno""" module for handling a `nxdetector `_ """ from __future__ import annotations import os from typing import Iterable from functools import partial from operator import is_not import numpy import h5py from h5py import h5s as h5py_h5s from h5py import VirtualSource from silx.io.url import DataUrl from silx.utils.proxy import docstring from silx.utils.enum import Enum as _Enum from silx.io.utils import open as hdf5_open from nxtomo.utils.frameappender import FrameAppender from nxtomo.utils.io import deprecated_warning, deprecated, ignore_deprecation_warning from nxtomo.io import from_virtual_source_to_data_url from nxtomo.paths.nxtomo import get_paths as get_nexus_path from nxtomo.nxobject.nxobject import ElementWithUnit, NXobject from nxtomo.nxobject.nxtransformations import ( NXtransformations, get_lr_flip, get_ud_flip, ) from nxtomo.utils import cast_and_check_array_1D, get_data, get_data_and_unit from nxtomo.utils.transformation import ( DetYFlipTransformation, DetZFlipTransformation, ) from pyunitsystem import TimeSystem, Unit from pyunitsystem.metricsystem import MetricSystem try: from h5py._hl.vds import VDSmap except ImportError: has_VDSmap = False else: has_VDSmap = True import logging import h5py._hl.selections as selection _logger = logging.getLogger(__name__) __all__ = ["FOV", "ImageKey", "FieldOfView", "NXdetector", "NXdetectorWithUnit"] class FOV(_Enum): """ Possible existing field of view. Use cases are described `here `_ """ @classmethod def from_value(cls, value): if isinstance(value, str): value = value.lower().title() return super().from_value(value) FULL = "Full" """we expect to have the full dataset in the field of view""" HALF = "Half" """we expect to have the half of the dataset in the field of view - around 360degree. reconstruction will generate a sinogram of the 'full' dataset""" FieldOfView = FOV class ImageKey(_Enum): """ NXdetector `image_key `_. used to distinguish different frame types. """ ALIGNMENT = -1 """used for alignment frame (also know as alignement frame)""" PROJECTION = 0 """Projection""" FLAT_FIELD = 1 """flat frame""" DARK_FIELD = 2 """dark frame""" INVALID = 3 """invalid frame (to be ignore during analysis)""" class NXdetector(NXobject): def __init__( self, node_name="detector", parent: NXobject | None = None, field_of_view: FOV | None = None, expected_dim: tuple | None = None, ) -> None: """ representation of `nexus nxdetector `_ Detector of the acquisition. :param node_name: name of the detector in the hierarchy :param parent: parent in the nexus hierarchy :param field_of_view: field of view of the detector - if know. :param expected_dim: user can provide expected dimensions as a tuple of int to be checked when data is set """ super().__init__(node_name=node_name, parent=parent) self._set_freeze(False) self._expected_dim = expected_dim self._data = None self.image_key_control = None self._x_pixel_size = ElementWithUnit(default_unit=MetricSystem.METER) # x 'sample' detector size self._y_pixel_size = ElementWithUnit(default_unit=MetricSystem.METER) # y 'sample' detector size self._transformations = NXtransformations(parent=self) self._distance = ElementWithUnit( default_unit=MetricSystem.METER ) # detector / sample distance self.field_of_view = field_of_view self._count_time = ElementWithUnit(default_unit=TimeSystem.SECOND) self.tomo_n = None self.group_size = None self._roi = None self.__master_vds_file = None # used to record the virtual dataset set file origin in order to solve relative links self._x_rotation_axis_pixel_position: float | None = None self._y_rotation_axis_pixel_position: float | None = None # as the class is 'freeze' we need to set 'estimated_cor_from_motor' once to make sure the API still exists. # the logger filtering avoid to have deprecation logs... with ignore_deprecation_warning(): self.estimated_cor_from_motor = None self._set_freeze(True) @property def data(self) -> numpy.ndarray | tuple | None: """ detector data (frames). can be None, a numpy array or a list of DataUrl xor h5py Virtual Source """ return self._data @data.setter def data(self, data: numpy.ndarray | tuple | None): if isinstance(data, (tuple, list)) or ( isinstance(data, numpy.ndarray) and data.ndim == 1 and (self._expected_dim is None or len(self._expected_dim) > 1) ): for elmt in data: if has_VDSmap: if not isinstance(elmt, (DataUrl, VirtualSource, VDSmap)): raise TypeError( f"element of 'data' are expected to be a {len(self._expected_dim)}D numpy array, a list of silx DataUrl or a list of h5py virtualSource. Not {type(elmt)}" ) data = tuple(data) elif isinstance(data, numpy.ndarray): if ( self._expected_dim is not None and data is not None and data.ndim not in self._expected_dim ): raise ValueError( f"data is expected to be {len(self._expected_dim)}D not {data.ndim}D" ) elif data is None: pass else: raise TypeError( f"data is expected to be an instance of {numpy.ndarray}, None or a list of silx DataUrl or h5py Virtual Source. Not {type(data)}" ) self._data = data @property def x_pixel_size(self) -> float | None: """ x pixel size as a field with a unit (get a value and a unit - default unit is SI). Know as 'x sample pixel size' in some application """ return self._x_pixel_size @x_pixel_size.setter def x_pixel_size(self, x_pixel_size: float | None) -> None: if not isinstance(x_pixel_size, (type(None), float)): raise TypeError( f"x_pixel_size is expected ot be an instance of {float} or None. Not {type(x_pixel_size)}" ) self._x_pixel_size.value = x_pixel_size @property def y_pixel_size(self) -> float | None: """ y pixel size as a field with a unit (get a value and a unit - default unit is SI). Know as 'y sample pixel size' in some application """ return self._y_pixel_size @y_pixel_size.setter def y_pixel_size(self, y_pixel_size: float | None) -> None: if not isinstance(y_pixel_size, (type(None), float)): raise TypeError( f"y_pixel_size is expected ot be an instance of {float} or None. Not {type(y_pixel_size)}" ) self._y_pixel_size.value = y_pixel_size @property def x_rotation_axis_pixel_position(self) -> float: """ Absolute position of the Center of Rotation in the detector space in X (X being the abscissa). units: pixel """ return self._x_rotation_axis_pixel_position @x_rotation_axis_pixel_position.setter def x_rotation_axis_pixel_position(self, value: float | None) -> None: if not isinstance(value, (float, type(None))): raise TypeError( f"x_rotation_axis_pixel_position is expected ot be an instance of {float} or None. Not {type(value)}" ) self._x_rotation_axis_pixel_position = value @property def y_rotation_axis_pixel_position(self) -> float: """ Absolute position of the Center of Rotation in the detector space in Y. (Y being the ordinate) units: pixel .. warning:: This field is not handled at the moment by tomotools. Only x position is handled. """ return self._y_rotation_axis_pixel_position @y_rotation_axis_pixel_position.setter def y_rotation_axis_pixel_position(self, value: float | None) -> None: if not isinstance(value, (float, type(None))): raise TypeError( f"y_rotation_axis_pixel_position is expected to be an instance of {float} or None. Not {type(value)}" ) self._y_rotation_axis_pixel_position = value @property def x_flipped(self): """ deprecated: detector image x flip. Use transformation instead """ deprecated_warning( type_="property", name="x_flipped", replacement="transformations", since_version="0.13", reason="NXtransformation is the nexus way to handle detector transformation", ) return DetZFlipTransformation(flip=True) in self.transformations.transformations @x_flipped.setter def x_flipped(self, flipped: bool | None): deprecated_warning( type_="property", name="x_flipped", replacement="transformations", since_version="0.13", reason="NXtransformation is the nexus way to handle detector transformation", ) self.set_transformation_from_x_flipped(flipped) def set_transformation_from_x_flipped(self, flipped: bool | None): """Util function to set transformation from x_flipped simple bool. Used for backward compatibility and convenience. """ # WARNING: moving from two simple boolean to full NXtransformations make the old API very weak. It should be removed # soon (but we want to keep the API for at least one release). This is expected to fail except if you stick to {x,y} flips if isinstance(flipped, numpy.bool_): flipped = bool(flipped) if not isinstance(flipped, (bool, type(None))): raise TypeError( f"x_flipped should be either a (python) boolean or None and is {flipped}, of type {type(flipped)}." ) current_lr_transfs = get_lr_flip(self.transformations) for transf in current_lr_transfs: self.transformations.rm_transformation(transformation=transf) self.transformations.add_transformation(DetZFlipTransformation(flip=flipped)) @property def y_flipped(self): """ deprecated: detector image y flip. Use transformation instead """ deprecated_warning( type_="property", name="y_flipped", replacement="transformations", since_version="0.13", reason="NXtransformation is the nexus way to handle detector transformation", ) return DetYFlipTransformation(flip=True) in self.transformations.transformations @y_flipped.setter def y_flipped(self, flipped: bool): deprecated_warning( type_="property", name="y_flipped", replacement="transformations", since_version="0.13", reason="NXtransformation is the nexus way to handle detector transformation", ) self.set_transformation_from_y_flipped(flipped=flipped) def set_transformation_from_y_flipped(self, flipped: bool | None): # WARNING: moving from two simple boolean to full NXtransformations make the old API very weak. It should be removed # soon (but we want to keep the API for at least one release). This is expected to fail except if you stick to {x,y} flips if isinstance(flipped, numpy.bool_): flipped = bool(flipped) if not isinstance(flipped, (bool, type(None))): raise TypeError( f"y_flipped should be either a (python) boolean or None and is {flipped}, of type {type(flipped)}." ) current_ud_transfs = get_ud_flip(self.transformations) for transf in current_ud_transfs: self.transformations.rm_transformation(transf) self.transformations.add_transformation(DetYFlipTransformation(flip=flipped)) @property def distance(self) -> float | None: """ sample / detector distance as a field with unit (default SI). """ return self._distance @distance.setter def distance(self, distance: float | None) -> None: if not isinstance(distance, (type(None), float)): raise TypeError( f"distance is expected to be an instance of {float} or None. Not {type(distance)}" ) self._distance.value = distance @property def field_of_view(self) -> FieldOfView | None: """ detector :class:`~nxtomo.nxobject.nxdetector.FieldOfView` """ return self._field_of_view @field_of_view.setter def field_of_view(self, field_of_view: FieldOfView | str | None) -> None: if field_of_view is not None: field_of_view = FOV.from_value(field_of_view) self._field_of_view = field_of_view @property def count_time(self) -> numpy.ndarray | None: """ count time for each frame """ return self._count_time @count_time.setter def count_time(self, count_time: Iterable | None): self._count_time.value = cast_and_check_array_1D(count_time, "count_time") @property @deprecated( replacement="x_rotation_axis_pixel_position", reason="exists in nexus standard", since_version="1.3", ) def estimated_cor_from_motor(self) -> float | None: """ hint of center of rotation in pixel read from motor (when possible) """ return self._x_rotation_axis_pixel_position @estimated_cor_from_motor.setter @deprecated( replacement="x_rotation_axis_pixel_position", reason="exists in nexus standard", since_version="1.3", ) def estimated_cor_from_motor(self, estimated_cor_from_motor: float | None): self._x_rotation_axis_pixel_position = estimated_cor_from_motor @property def image_key_control(self) -> numpy.ndarray | None: """ :class:`~nxtomo.nxobject.nxdetector.ImageKey` for each frames """ return self._image_key_control @image_key_control.setter def image_key_control(self, control_image_key: Iterable | None): control_image_key = cast_and_check_array_1D( control_image_key, "control_image_key" ) if control_image_key is None: self._image_key_control = None else: # cast all value to instances of ImageKey self._image_key_control = numpy.asarray( [ImageKey.from_value(key) for key in control_image_key] ) @property def image_key(self) -> numpy.ndarray | None: """ :class:`~nxtomo.nxobject.nxdetector.ImageKey` for each frames. Replace all :class:`~nxtomo.nxobject,nxdetector.ImageKey.ALIGNMENT` by :class:`~nxtomo.nxobject,nxdetector.ImageKey.PROJECTION` to fulfil nexus standard """ if self.image_key_control is None: return None else: control_image_key = self.image_key_control.copy() control_image_key[control_image_key == ImageKey.ALIGNMENT] = ( ImageKey.PROJECTION ) return control_image_key @property def tomo_n(self) -> int | None: """ expected number of :class:`~nxtomo.nxobject,nxdetector.ImageKey.PROJECTION` frames """ return self._tomo_n @tomo_n.setter def tomo_n(self, tomo_n: int | None): self._tomo_n = tomo_n @property def group_size(self) -> int | None: """ number of acquisition for the dataset """ return self._group_size @group_size.setter def group_size(self, group_size: int | None): self._group_size = group_size @property def roi(self) -> tuple | None: """ detector region of interest as x0,y0,x1,y1 """ return self._roi @roi.setter def roi(self, roi: tuple | None) -> None: if roi is None: self._roi = None elif not isinstance(roi, (tuple, list, numpy.ndarray)): raise TypeError("roi is expected to be None or a tuple") elif len(roi) != 4: raise ValueError( f"roi is expected to contains four elements. Get {len(roi)}" ) else: self._roi = tuple(roi) @docstring(NXobject) def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: nexus_paths = get_nexus_path(nexus_path_version) nexus_detector_paths = nexus_paths.nx_detector_paths nx_dict = {} # image key control if self.image_key_control is not None: path_img_key = f"{self.path}/{nexus_detector_paths.IMAGE_KEY}" nx_dict[path_img_key] = [img_key.value for img_key in self.image_key] path_img_key_ctrl = f"{self.path}/{nexus_detector_paths.IMAGE_KEY_CONTROL}" nx_dict[path_img_key_ctrl] = [ img_key.value for img_key in self.image_key_control ] # x 'sample' pixel if self.x_pixel_size.value is not None: path_x_pixel_size = f"{self.path}/{nexus_detector_paths.X_PIXEL_SIZE}" nx_dict[path_x_pixel_size] = self.x_pixel_size.value nx_dict["@".join([path_x_pixel_size, "units"])] = str( self.x_pixel_size.unit ) # y 'sample' pixel if self.y_pixel_size.value is not None: path_y_pixel_size = f"{self.path}/{nexus_detector_paths.Y_PIXEL_SIZE}" nx_dict[path_y_pixel_size] = self.y_pixel_size.value nx_dict["@".join([path_y_pixel_size, "units"])] = str( self.y_pixel_size.unit ) # distance if self.distance.value is not None: path_distance = f"{self.path}/{nexus_detector_paths.DISTANCE}" nx_dict[path_distance] = self.distance.value nx_dict["@".join([path_distance, "units"])] = str(self.distance.unit) # FOV if self.field_of_view is not None: path_fov = f"{self.path}/{nexus_detector_paths.FOV}" nx_dict[path_fov] = self.field_of_view.value # count time if self.count_time.value is not None: path_count_time = f"{self.path}/{nexus_detector_paths.EXPOSURE_TIME}" nx_dict[path_count_time] = self.count_time.value nx_dict["@".join([path_count_time, "units"])] = str(self.count_time.unit) # tomo n if self.tomo_n is not None: tomo_n_fov_path = f"{nexus_paths.TOMO_N_SCAN}" nx_dict[tomo_n_fov_path] = self.tomo_n if self.group_size is not None: group_size_path = f"{self.path}/{nexus_paths.GRP_SIZE_ATTR}" nx_dict[group_size_path] = self.group_size # x rotation axis position if self.x_rotation_axis_pixel_position is not None: x_rotation_axis_pixel_position_path = ( nexus_detector_paths.X_ROTATION_AXIS_PIXEL_POSITION or nexus_detector_paths.ESTIMATED_COR_FRM_MOTOR ) if x_rotation_axis_pixel_position_path is not None: x_rot_axis_pos_path = ( f"{self.path}/{x_rotation_axis_pixel_position_path}" ) nx_dict[x_rot_axis_pos_path] = self.x_rotation_axis_pixel_position nx_dict[f"{x_rot_axis_pos_path}@units"] = "pixel" # y rotation axis position if ( self.y_rotation_axis_pixel_position is not None and nexus_detector_paths.Y_ROTATION_AXIS_PIXEL_POSITION is not None ): y_rot_axis_pos_path = ( f"{self.path}/{nexus_detector_paths.Y_ROTATION_AXIS_PIXEL_POSITION}" ) nx_dict[y_rot_axis_pos_path] = self.y_rotation_axis_pixel_position nx_dict[f"{y_rot_axis_pos_path}@units"] = "pixel" if self.roi is not None: path_roi = f"{self.path}/{nexus_detector_paths.ROI}" nx_dict[path_roi] = self.roi nx_dict["@".join([path_roi, "units"])] = "pixel" # export TRANSFORMATIONS nx_dict.update( self.transformations.to_nx_dict( nexus_path_version=nexus_path_version, data_path=data_path, solve_empty_dependency=True, ) ) # export detector data nx_dict.update( self._data_to_nx_dict( nexus_path_version=nexus_path_version, data_path=data_path, ) ) return nx_dict def _data_to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: nexus_paths = get_nexus_path(nexus_path_version) nexus_detector_paths = nexus_paths.nx_detector_paths nx_dict = {} if self.data is not None: # add data path_data = f"{self.path}/{nexus_detector_paths.DATA}" nx_dict[path_data] = self.data nx_dict["@".join([path_data, "interpretation"])] = "image" nx_dict["__vds_master_file__"] = self.__master_vds_file # add attributes to data nx_dict[f"{self.path}@NX_class"] = "NXdetector" nx_dict[f"{self.path}@signal"] = nexus_detector_paths.DATA nx_dict[f"{self.path}@SILX_style/axis_scale_types"] = [ "linear", "linear", ] return nx_dict def _load( self, file_path: str, data_path: str, nexus_version: float, load_data_as: str ) -> None: possible_as_values = ("as_virtual_source", "as_data_url", "as_numpy_array") if load_data_as not in possible_as_values: raise ValueError( f"load_data_as is expected to be in {possible_as_values} and not {load_data_as}" ) self.__master_vds_file = file_path # record the input file if we need to solve virtual dataset path from it nexus_paths = get_nexus_path(nexus_version) nexus_detector_paths = nexus_paths.nx_detector_paths data_dataset_path = f"{data_path}/{nexus_detector_paths.DATA}" def vs_file_path_to_real_path(file_path, vs_file_path): # get file path as absolute for the NXtomo. Simplify management of the # directories if os.path.isabs(vs_file_path): return vs_file_path else: return os.path.join(os.path.dirname(file_path), vs_file_path) # step 1: load frames with hdf5_open(file_path) as h5f: if data_dataset_path in h5f: dataset = h5f[data_dataset_path] else: _logger.error(f"unable to find {data_dataset_path} from {file_path}") return if load_data_as == "as_numpy_array": self.data = dataset[()] elif load_data_as == "as_data_url": if dataset.is_virtual: urls = [] for vs_info in dataset.virtual_sources(): select_bounds = vs_info.vspace.get_select_bounds() left_bound = select_bounds[0] right_bound = select_bounds[1] # warning: for now step is not managed with virtual # dataset length = right_bound[0] - left_bound[0] + 1 # warning: for now step is not managed with virtual # dataset virtual_source = h5py.VirtualSource( vs_file_path_to_real_path( file_path=file_path, vs_file_path=vs_info.file_name ), vs_info.dset_name, vs_info.vspace.shape, ) # here we could provide dataset but we won't to # insure file path will be relative. type_code = vs_info.src_space.get_select_type() # check for unlimited selections in case where selection is regular # hyperslab, which is the only allowed case for h5s.UNLIMITED to be # in the selection if ( type_code == h5py_h5s.SEL_HYPERSLABS and vs_info.src_space.is_regular_hyperslab() ): ( source_start, stride, count, block, ) = vs_info.src_space.get_regular_hyperslab() source_end = source_start[0] + length sel = selection.select( dataset.shape, slice(source_start[0], source_end), dataset=dataset, ) virtual_source.sel = sel urls.append(from_virtual_source_to_data_url(virtual_source)) else: urls = [ DataUrl( file_path=file_path, data_path=data_dataset_path, scheme="silx", ) ] self.data = urls elif load_data_as == "as_virtual_source": if dataset.is_virtual: virtual_sources = [] for vs_info in dataset.virtual_sources(): u_vs_info = VDSmap( vspace=vs_info.vspace, file_name=vs_file_path_to_real_path( file_path=file_path, vs_file_path=vs_info.file_name ), dset_name=vs_info.dset_name, src_space=vs_info.src_space, ) _, vs = FrameAppender._recreate_vs( vs_info=u_vs_info, vds_file=file_path ) virtual_sources.append(vs) self.data = virtual_sources else: raise ValueError(f"{data_dataset_path} is not virtual") # step 2: load metadata # load 'sample' pixel size try: self.x_pixel_size, self.x_pixel_size.unit = get_data_and_unit( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.X_PIXEL_SIZE]), default_unit=MetricSystem.METER, ) except TypeError as e: # in case loaded pixel size doesn't fit the type (case Diamond dataset) _logger.warning(f"Fail to load x pixel size. Error is {e}") try: self.y_pixel_size, self.y_pixel_size.unit = get_data_and_unit( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.Y_PIXEL_SIZE]), default_unit=MetricSystem.METER, ) except TypeError as e: # in case loaded pixel size doesn't fit the type (case Diamond dataset) _logger.warning(f"Fail to load y pixel size. Error is {e}") x_rotation_axis_pixel_position_path = ( nexus_detector_paths.X_ROTATION_AXIS_PIXEL_POSITION or nexus_detector_paths.ESTIMATED_COR_FRM_MOTOR ) if x_rotation_axis_pixel_position_path is not None: self.x_rotation_axis_pixel_position = get_data( file_path=file_path, data_path=f"{data_path}/{x_rotation_axis_pixel_position_path}", ) if nexus_detector_paths.Y_ROTATION_AXIS_PIXEL_POSITION is not None: self.y_rotation_axis_pixel_position = get_data( file_path=file_path, data_path=f"{data_path}/{nexus_detector_paths.Y_ROTATION_AXIS_PIXEL_POSITION}", ) # TODO Henri: create a function without the warning for the backward compatibility if nexus_detector_paths.X_FLIPPED is not None: self.set_transformation_from_x_flipped( get_data( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.X_FLIPPED]), ) ) if nexus_detector_paths.Y_FLIPPED is not None: self.set_transformation_from_y_flipped( get_data( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.Y_FLIPPED]), ) ) if nexus_detector_paths.NX_TRANSFORMATIONS is not None: transformations = self.load_transformations( file_path=file_path, data_path=data_path, nexus_version=nexus_version, ) if transformations is not None: self.transformations = transformations try: self.distance, self.distance.unit = get_data_and_unit( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.DISTANCE]), default_unit=MetricSystem.METER, ) except TypeError as e: # in case loaded pixel size doesn't fit the type (case Diamond dataset) _logger.warning(f"Fail to load distance. Error is {e}") self.field_of_view = get_data( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.FOV]), ) self.count_time, self.count_time.unit = get_data_and_unit( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.EXPOSURE_TIME]), default_unit=TimeSystem.SECOND, ) self.tomo_n = get_data( file_path=file_path, data_path="/".join([data_path, nexus_paths.TOMO_N_SCAN]), ) self.group_size = get_data( file_path=file_path, data_path="/".join([data_path, nexus_paths.GRP_SIZE_ATTR]), ) self.image_key_control = get_data( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.IMAGE_KEY_CONTROL]), ) if self.image_key_control is None: # in the case image_key_control doesn't exists (dimaond dataset use case) self.image_key_control = get_data( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.IMAGE_KEY]), ) roi = get_data( file_path=file_path, data_path="/".join([data_path, nexus_detector_paths.ROI]), ) if roi is not None: self.roi = roi @staticmethod def load_transformations(file_path: str, data_path: str, nexus_version): """ as transformations is not a fixed position try to load it from the default location ('transformations') else browse all HDF5 groups to retrieve an NXTransformations group """ nexus_paths = get_nexus_path(nexus_version) nexus_detector_paths = nexus_paths.nx_detector_paths with hdf5_open(file_path) as h5f: if data_path not in h5f: return None detector_grp = h5f[data_path] # filter valid groups (fitting NXtransformations definition) valid_data_paths = dict( filter( lambda item: NXtransformations.is_a_valid_group(item[1]), detector_grp.items(), ) ) if len(valid_data_paths) == 0: return None elif len(valid_data_paths) > 1: issue = "more than one NXtransformations group found" if nexus_detector_paths.NX_TRANSFORMATIONS in valid_data_paths: _logger.warning( f"{issue}. Will pick the default path as there ({nexus_detector_paths.NX_TRANSFORMATIONS})" ) return NXtransformations.load_from_file( file_path=file_path, data_path="/".join( [data_path, nexus_detector_paths.NX_TRANSFORMATIONS] ), nexus_version=nexus_version, ) raise ValueError(f"{issue} - ({valid_data_paths}). Unable to handle it") else: return NXtransformations.load_from_file( file_path=file_path, data_path="/".join([data_path, list(valid_data_paths.keys())[0]]), nexus_version=nexus_version, ) @staticmethod def _concatenate_except_data(nx_detector, nx_objects: tuple): image_key_ctrl = [ nx_obj.image_key_control for nx_obj in nx_objects if nx_obj.image_key_control is not None ] if len(image_key_ctrl) > 0: nx_detector.image_key_control = numpy.concatenate(image_key_ctrl) # note: image_key is deduced from image_key_control nx_detector.x_pixel_size = nx_objects[0].x_pixel_size.value nx_detector.roi = nx_objects[0].roi nx_detector.y_pixel_size = nx_objects[0].y_pixel_size.value nx_detector.x_rotation_axis_pixel_position = nx_objects[ 0 ].x_rotation_axis_pixel_position nx_detector.y_rotation_axis_pixel_position = nx_objects[ 0 ].y_rotation_axis_pixel_position nx_detector.distance = nx_objects[0].distance.value nx_detector.field_of_view = nx_objects[0].field_of_view nx_detector.transformations = nx_objects[0].transformations for nx_obj in nx_objects[1:]: if nx_detector.x_pixel_size.value and not numpy.isclose( nx_detector.x_pixel_size.value, nx_obj.x_pixel_size.value ): _logger.warning( f"found different x pixel size value. ({nx_detector.x_pixel_size.value} vs {nx_obj.x_pixel_size.value}). Pick the first one" ) if nx_detector.y_pixel_size.value and not numpy.isclose( nx_detector.y_pixel_size.value, nx_obj.y_pixel_size.value ): _logger.warning( f"found different y pixel size value. ({nx_detector.y_pixel_size.value} vs {nx_obj.y_pixel_size.value}). Pick the first one" ) if nx_detector.transformations != nx_obj.transformations: _logger.warning( f"found different NXTransformations. ({nx_detector.transformations.to_nx_dict()} vs {nx_obj.transformations.to_nx_dict()}). Pick the first one" ) if nx_detector.distance.value and not numpy.isclose( nx_detector.distance.value, nx_obj.distance.value ): _logger.warning( f"found different distance value. ({nx_detector.distance.value} vs {nx_obj.distance.value}). Pick the first one" ) if ( nx_detector.field_of_view and nx_detector.field_of_view != nx_obj.field_of_view ): _logger.warning( f"found different field_of_view value. ({nx_detector.field_of_view} vs {nx_obj.field_of_view}). Pick the first one" ) if nx_detector.roi != nx_obj.roi: _logger.warning( f"found different detector roi value. ({nx_detector.roi} vs {nx_obj.roi}). Pick the first one" ) @staticmethod @docstring(NXobject) def concatenate(nx_objects: tuple, node_name="detector"): # filter None obj nx_objects = tuple(filter(partial(is_not, None), nx_objects)) if len(nx_objects) == 0: return None # warning: later we make the assumption that nx_objects contains at least one element for nx_obj in nx_objects: if not isinstance(nx_obj, NXdetector): raise TypeError("Cannot concatenate non NXinstrument object") nx_detector = NXdetector(node_name=node_name) NXdetector._concatenate_except_data( nx_objects=nx_objects, nx_detector=nx_detector ) # now handle data on it's own detector_data = [ nx_obj.data for nx_obj in nx_objects if nx_obj.data is not None ] if len(detector_data) > 0: if isinstance(detector_data[0], numpy.ndarray): # store_as = "as_numpy_array" expected = numpy.ndarray elif isinstance(detector_data[0], Iterable): if isinstance(detector_data[0][0], h5py.VirtualSource): # store_as = "as_virtual_source" expected = h5py.VirtualSource elif isinstance(detector_data[0][0], DataUrl): # store_as = "as_data_url" expected = DataUrl else: raise TypeError( f"detector data is expected to be a numpy array or a h5py.VirtualSource or a numpy array. {type(detector_data[0][0])} is not handled." ) else: raise TypeError( f"detector data is expected to be a numpy array or a h5py.VirtualSource or a numpy array. {type(detector_data[0])} is not handled." ) for data in detector_data: if expected in (DataUrl, h5py.VirtualSource): # for DataUrl and VirtualSource check type of the element cond = isinstance(data[0], expected) else: cond = isinstance(data, expected) if not cond: raise TypeError( f"Incoherent data type cross detector data ({type(data)} when {expected} expected)" ) if expected in (DataUrl, h5py.VirtualSource): new_data = [] [new_data.extend(data) for data in detector_data] else: new_data = numpy.concatenate(detector_data) nx_detector.data = new_data return nx_detector @property def transformations(self): """ `NXtransformation `_ of detector (image flip, manual rotation of the detector...) """ return self._transformations @transformations.setter def transformations(self, transformations: NXtransformations) -> None: self._transformations = transformations class NXdetectorWithUnit(NXdetector): def __init__( self, default_unit: Unit, node_name="detector", parent=None, field_of_view=None, expected_dim: tuple | None = None, ) -> None: super().__init__(node_name, parent, field_of_view, expected_dim) self._data = ElementWithUnit(default_unit=default_unit) @property @docstring(NXdetector) def data(self) -> numpy.ndarray | tuple: """data can be None, a numpy array or a list of DataUrl xor h5py Virtual Source""" return self._data @data.setter @docstring(NXdetector) def data(self, data: numpy.ndarray | tuple | None): if isinstance(data, numpy.ndarray): if ( self._expected_dim is not None and data is not None and data.ndim not in self._expected_dim ): raise ValueError( f"data is expected to be {self._expected_dim}d not {data.ndim}d" ) elif isinstance(data, (tuple, list)): for elmt in data: if not isinstance(elmt, (DataUrl, VirtualSource)): raise TypeError( f"'data' is expected to be a numpy array or a list/tuple composed of DataUrl or h5py virtualSource. Not {type(elmt)}" ) data = tuple(data) elif data is None: pass else: raise TypeError( f"data is expected to be an instance of {numpy.ndarray}, None or a list of silx DataUrl or h5py Virtual Source. Not {type(data)}" ) self._data.value = data def _data_to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: nexus_paths = get_nexus_path(nexus_path_version) nexus_detector_paths = nexus_paths.nx_detector_paths nx_dict = {} if self.data.value is not None: # add data path_data = f"{self.path}/{nexus_detector_paths.DATA}" nx_dict[path_data] = self.data.value nx_dict["@".join([path_data, "interpretation"])] = "image" # add attributes to data nx_dict[f"{self.path}@NX_class"] = "NXdetector" nx_dict[f"{self.path}@signal"] = nexus_detector_paths.DATA nx_dict[f"{self.path}@SILX_style/axis_scale_types"] = [ "linear", "linear", ] return nx_dict @staticmethod @docstring(NXobject) def concatenate( nx_objects: tuple, default_unit, expected_dim, node_name="detector" ): # filter None obj nx_objects = tuple(filter(partial(is_not, None), nx_objects)) if len(nx_objects) == 0: return None # warning: later we make the assumption that nx_objects contains at least one element for nx_obj in nx_objects: if not isinstance(nx_obj, NXdetector): raise TypeError("Cannot concatenate non NXinstrument object") nx_detector = NXdetectorWithUnit( node_name=node_name, default_unit=default_unit, expected_dim=expected_dim ) NXdetector._concatenate_except_data( nx_objects=nx_objects, nx_detector=nx_detector ) # now handle data on it's own detector_data = [ nx_obj.data.value for nx_obj in nx_objects if (nx_obj.data is not None and nx_obj.data.value is not None) ] detector_units = set( [ nx_obj.data.unit for nx_obj in nx_objects if (nx_obj.data is not None and nx_obj.data.value is not None) ] ) if len(detector_units) > 1: # with DataUrl and Virtual Sources we are not able to do conversion raise ValueError("More than one units found. Unagle to build the detector") if len(detector_data) > 0: if isinstance(detector_data[0], numpy.ndarray): # store_as = "as_numpy_array" expected = numpy.array elif isinstance(detector_data[0], Iterable): if isinstance(detector_data[0][0], h5py.VirtualSource): # store_as = "as_virtual_source" expected = h5py.VirtualSource elif isinstance(detector_data[0][0], DataUrl): # store_as = "as_data_url" expected = DataUrl else: raise TypeError( f"detector data is expected to be a numpy array or a h5py.VirtualSource or a numpy array. {type(detector_data[0][0])} is not handled." ) else: raise TypeError( f"detector data is expected to be a numpy array or a h5py.VirtualSource or a numpy array. {type(detector_data[0])} is not handled." ) for data in detector_data: if expected in (DataUrl, h5py.VirtualSource): # for DataUrl and VirtualSource check type of the element cond = isinstance(data[0], expected) else: cond = isinstance(data, expected) if not cond: raise TypeError( f"Incoherent data type cross detector data ({type(data)} when {expected} expected)" ) if expected in (DataUrl, h5py.VirtualSource): new_data = [] [new_data.extend(data) for data in detector_data] else: new_data = numpy.concatenate(detector_data) nx_detector.data.value = new_data nx_detector.data.unit = list(detector_units)[0] return nx_detector ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757728.0 nxtomo-1.3.0.dev9/nxtomo/nxobject/nxinstrument.py0000644000175000017500000001724714676676640021334 0ustar00paynopayno""" module for handling a `nxinstrument `_ """ from __future__ import annotations import logging from functools import partial from operator import is_not from silx.utils.proxy import docstring from silx.io.utils import open as open_hdf5 from pyunitsystem.voltagesystem import VoltageSystem from nxtomo.paths.nxtomo import get_paths as get_nexus_paths from nxtomo.nxobject.nxdetector import NXdetector, NXdetectorWithUnit from nxtomo.nxobject.nxsource import DefaultESRFSource, NXsource from nxtomo.nxobject.nxobject import NXobject from nxtomo.utils import get_data _logger = logging.getLogger(__name__) __all__ = [ "NXinstrument", ] class NXinstrument(NXobject): def __init__( self, node_name: str = "instrument", parent: NXobject | None = None ) -> None: """ representation of `nexus NXinstrument `_. Collection of the components of the instrument or beamline. :param node_name: name of the detector in the hierarchy :param parent: parent in the nexus hierarchy """ super().__init__(node_name=node_name, parent=parent) self._set_freeze(False) self._detector = NXdetector( node_name="detector", parent=self, field_of_view="Full", expected_dim=(2, 3), ) self._diode = NXdetectorWithUnit( node_name="diode", parent=self, expected_dim=(1,), default_unit=VoltageSystem.VOLT, ) self._source = DefaultESRFSource(node_name="source", parent=self) self._name = None self._set_freeze(True) @property def detector(self) -> NXdetector | None: """ :class:`~nxtomo.nxobject.nxdetector.NXdetector` """ return self._detector @detector.setter def detector(self, detector: NXdetector | None): if not isinstance(detector, (NXdetector, type(None))): raise TypeError( f"detector is expected to be None or an instance of NXdetector. Not {type(detector)}" ) self._detector = detector @property def diode(self) -> NXdetector | None: """ :class:`~nxtomo.nxobject.nxdetector.NXdetector` """ return self._diode @diode.setter def diode(self, diode: NXdetector | None): if not isinstance(diode, (NXdetector, type(None))): raise TypeError( f"diode is expected to be None or an instance of NXdetector. Not {type(diode)}" ) self._diode = diode @property def source(self) -> NXsource | None: """ :class:`~nxtomo.nxobject.nxdetector.NXsource` """ return self._source @source.setter def source(self, source: NXsource | None) -> None: if not isinstance(source, (NXsource, type(None))): raise TypeError( f"source is expected to be None or an instance of NXsource. Not {type(source)}" ) self._source = source @property def name(self) -> str | None: """instrument name like BM00""" return self._name @name.setter def name(self, name: str | None) -> None: if not isinstance(name, (str, type(None))): raise TypeError( f"name is expected to be None or an instance of str. Not {type(name)}" ) self._name = name @docstring(NXobject) def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: nexus_paths = get_nexus_paths(nexus_path_version) nexus_instrument_paths = nexus_paths.nx_instrument_paths nx_dict = {} if self._detector is not None: nx_dict.update( self._detector.to_nx_dict(nexus_path_version=nexus_path_version) ) if self._diode is not None: nx_dict.update( self._diode.to_nx_dict(nexus_path_version=nexus_path_version) ) if self._source is not None: nx_dict.update( self.source.to_nx_dict(nexus_path_version=nexus_path_version) ) if self.name is not None: nx_dict[f"{self.path}/{nexus_instrument_paths.NAME}"] = self.name if nx_dict != {}: nx_dict[f"{self.path}@NX_class"] = "NXinstrument" return nx_dict def _load( self, file_path: str, data_path: str, nexus_version: float, detector_data_as: str, ) -> NXobject: """ Create and load an NXsample from data on disk """ nexus_paths = get_nexus_paths(nexus_version) nexus_instrument_paths = nexus_paths.nx_instrument_paths with open_hdf5(file_path) as h5f: if data_path in h5f: has_detector = "detector" in h5f[data_path] has_diode = "diode" in h5f[data_path] has_source = "source" in h5f[data_path] else: has_detector = False has_diode = False has_source = False # TODO: loading detector might be done using the NXclass instead of some hard coded names if has_detector: self.detector._load( file_path=file_path, data_path="/".join( [data_path, "detector"], ), nexus_version=nexus_version, load_data_as=detector_data_as, ) if has_diode: self.diode._load( file_path=file_path, data_path="/".join( [data_path, "diode"], ), nexus_version=nexus_version, load_data_as="as_numpy_array", ) if has_source: self.source._load( file_path=file_path, data_path="/".join([data_path, "source"]), nexus_version=nexus_version, ) if nexus_instrument_paths.NAME is not None: self.name = get_data( file_path=file_path, data_path="/".join([data_path, nexus_instrument_paths.NAME]), ) @staticmethod @docstring(NXobject) def concatenate(nx_objects: tuple, node_name="instrument"): # filter None obj nx_objects = tuple(filter(partial(is_not, None), nx_objects)) if len(nx_objects) == 0: return None # warning: later we make the assumption that nx_objects contains at least one element for nx_obj in nx_objects: if not isinstance(nx_obj, NXinstrument): raise TypeError("Cannot concatenate non NXinstrument object") nx_instrument = NXinstrument(node_name=node_name) nx_instrument.name = nx_objects[0].name _logger.info(f"instrument name {nx_objects[0].name} will be picked") nx_instrument.source = NXsource.concatenate( [nx_obj.source for nx_obj in nx_objects], node_name="source", ) nx_instrument.source.parent = nx_instrument nx_instrument.diode = NXdetectorWithUnit.concatenate( [nx_obj.diode for nx_obj in nx_objects], node_name="diode", expected_dim=(1,), default_unit=VoltageSystem.VOLT, ) nx_instrument.diode.parent = nx_instrument nx_instrument.detector = NXdetector.concatenate( [nx_obj.detector for nx_obj in nx_objects], node_name="detector", ) nx_instrument.detector.parent = nx_instrument return nx_instrument ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757728.0 nxtomo-1.3.0.dev9/nxtomo/nxobject/nxmonitor.py0000644000175000017500000000736014676676640020606 0ustar00paynopayno""" module for handling a `nxmonitor `_ """ from __future__ import annotations from functools import partial from operator import is_not import numpy from silx.utils.proxy import docstring from nxtomo.paths.nxtomo import get_paths as get_nexus_paths from nxtomo.nxobject.nxobject import ElementWithUnit, NXobject from nxtomo.utils import get_data_and_unit from pyunitsystem import ElectricCurrentSystem __all__ = [ "NXmonitor", ] class NXmonitor(NXobject): def __init__(self, node_name="control", parent: NXobject | None = None) -> None: """ representation of `nexus NXmonitor `_. A monitor of incident beam data. :param node_name: name of the detector in the hierarchy :param parent: parent in the nexus hierarchy """ super().__init__(node_name=node_name, parent=parent) self._set_freeze(False) self._data = ElementWithUnit(default_unit=ElectricCurrentSystem.AMPERE) self._set_freeze(True) @property def data(self) -> numpy.ndarray | None: """ monitor data. In the case of NXtomo it expects to contains machine electric current for each frame """ return self._data @data.setter def data(self, data: numpy.ndarray | list | tuple | None): if isinstance(data, (tuple, list)): if len(data) == 0: data = None else: data = numpy.asarray(data) if isinstance(data, numpy.ndarray): if not data.ndim == 1: raise ValueError(f"data is expected to be 1D and not {data.ndim}d") elif not isinstance(data, type(None)): raise TypeError( f"data is expected to be None or a numpy array. Not {type(data)}" ) self._data.value = data @docstring(NXobject) def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: nexus_paths = get_nexus_paths(nexus_path_version) monitor_nexus_paths = nexus_paths.nx_monitor_paths nx_dict = {} if self.data.value is not None: if monitor_nexus_paths.DATA_PATH is not None: data_path = f"{self.path}/{monitor_nexus_paths.DATA_PATH}" nx_dict[data_path] = self.data.value nx_dict["@".join([data_path, "units"])] = str(self.data.unit) if nx_dict != {}: nx_dict[f"{self.path}@NX_class"] = "NXmonitor" return nx_dict def _load(self, file_path: str, data_path: str, nexus_version: float) -> NXobject: """ Create and load an NXmonitor from data on disk """ nexus_paths = get_nexus_paths(nexus_version) monitor_nexus_paths = nexus_paths.nx_monitor_paths if monitor_nexus_paths.DATA_PATH is not None: self.data, self.data.unit = get_data_and_unit( file_path=file_path, data_path="/".join([data_path, monitor_nexus_paths.DATA_PATH]), default_unit="Ampere", ) @staticmethod @docstring(NXobject) def concatenate(nx_objects: tuple, node_name: str = "control"): # filter None obj nx_objects = tuple(filter(partial(is_not, None), nx_objects)) if len(nx_objects) == 0: return None nx_monitor = NXmonitor(node_name=node_name) data = [ nx_obj.data.value * nx_obj.data.unit.value for nx_obj in nx_objects if nx_obj.data.value is not None ] if len(data) > 0: nx_monitor.data = numpy.concatenate(data) return nx_monitor ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757728.0 nxtomo-1.3.0.dev9/nxtomo/nxobject/nxobject.py0000644000175000017500000003406014676676640020362 0ustar00paynopayno""" module for handling a `nxobject `_ """ from __future__ import annotations import os import logging import h5py from silx.io.dictdump import dicttonx from silx.io.url import DataUrl from nxtomo.io import ( cwd_context, HDF5File, to_target_rel_path, from_data_url_to_virtual_source, ) from nxtomo.paths.nxtomo import LATEST_VERSION as LATEST_NXTOMO_VERSION from pyunitsystem import Unit _logger = logging.getLogger(__name__) class ElementWithUnit: def __init__(self, default_unit: Unit) -> None: """ Util class to let the user define a unit with a value :param default_unit: default unit of the element """ if not isinstance(default_unit, Unit): raise TypeError(f"{default_unit} should be an instance of {Unit}") self._value = None self._unit = default_unit self._unit_type = type(default_unit) @property def unit(self) -> float | None: """ unit as a float to cast it to SI """ return self._unit @unit.setter def unit(self, unit) -> None: try: unit = self._unit_type.from_value(unit) except Exception: pass if not isinstance(unit, self._unit_type): if isinstance(unit, str): raise ValueError(f"Unable to cast {unit} to a {type(self._unit_type)}") else: raise TypeError( f"invalid unit type. {type(unit)} provided when {type(self._unit_type)} expected" ) self._unit = unit @property def value(self): """ element value """ return self._value @value.setter def value(self, value): self._value = value @property def si_value(self): """ value converted to international system """ if self._value is None: return None return self._value * self.unit.value def __str__(self): return f"{self.value} {str(self.unit)}" class NXobject: __isfrozen = False # to ease API and avoid setting wrong attributes we 'freeze' the attributes # see https://stackoverflow.com/questions/3603502/prevent-creating-new-attributes-outside-init def __init__(self, node_name: str, parent=None) -> None: """ representation of `nexus NXobject `_. A monitor of incident beam data. :param node_name: name of the detector in the hierarchy :param parent: parent in the nexus hierarchy """ if not isinstance(node_name, str): raise TypeError( f"name is expected to be an instance of str. Not {type(node_name)}" ) if "/" in node_name: # make sure there is no '/' character. This is reserved to define the NXobject hierarchy raise ValueError( "'/' found in 'node_name' parameter. This is a reserved character. Please change the name" ) self.node_name = node_name self.parent = parent self._set_freeze() def _set_freeze(self, freeze=True): self.__isfrozen = freeze @property def parent(self): # -> NXobject | None: """ :class:`~nxtomo.nxobject.nxobject.NXobject` parent in the hierarchy """ return self._parent @parent.setter def parent(self, parent) -> None: if not isinstance(parent, (type(None), NXobject)): raise TypeError( f"parent is expected to be None or an instance of {NXobject}" ) self._parent = parent @property def is_root(self) -> bool: """is this :class:`~nxtomo.nxobject.nxobject.NXobject` is the higher one""" return self.parent is None @property def root_path(self) -> str: """return path of the root :class:`~nxtomo.nxobject.nxobject.NXobject`""" if self.is_root: return self.path else: return self.parent.root_path @property def path(self): """ path of the object in the nexus hierarchy """ if self.parent is not None: path = "/".join([self.parent.path, self.node_name]) else: path = "" # clean some possible issues with "//" path = path.replace("//", "/") return path @property def node_name(self) -> str: """name of the :class:`~nxtomo.nxobject.nxobject.NXobject` - in the nexus hierarchy""" return self._node_name @node_name.setter def node_name(self, node_name: str): if not isinstance(node_name, str): raise TypeError( f"nexus_name should be an instance of str and not {type(node_name)}" ) self._node_name = node_name def save( self, file_path: str, data_path: str | None = None, nexus_path_version: float | None = None, overwrite: bool = False, ) -> None: """ save NXtomo to disk. :param file_path: hdf5 file :param data_path: location to the NXobject. If not provided will be stored under node_name if provided (and valid) :param nexus_path_version: Optional nexus version as float. If the saving must be done **not** using the latest version :param overwrite: if the data_path in file_path is already existing overwrite it. Else raise will raise an error """ if data_path == "/": _logger.warning( "'data_path' set to '/' is now an invalid value. Please set 'data_path' to None if you want to store it under the NXobject name at root level, else provide data_path. Will ignore it." ) data_path = None entry_path = data_path or self.path or self.node_name # entry path is the 'root path'. If not provided use self.path. If None (if at the root level) then use the node name for key, value in dict( [("file_path", file_path), ("entry", data_path)] ).items(): if not isinstance(value, (type(None), str)): raise TypeError( f"{key} is expected to be None or an instance of str not {type(value)}" ) if not isinstance(overwrite, bool): raise TypeError if entry_path.lstrip("/").rstrip("/") == "": raise ValueError( f"root NXobject need to have a data_path to be saved. '{entry_path}' is invalid. Interpreted as '{entry_path.lstrip('/').rstrip('/')}'" ) # not fully sure about the dicttoh5 "add" behavior if os.path.exists(file_path): with h5py.File(file_path, mode="a") as h5f: if entry_path != "/" and entry_path in h5f: if overwrite: del h5f[entry_path] else: raise KeyError(f"{entry_path} already exists") if nexus_path_version is None: nexus_path_version = LATEST_NXTOMO_VERSION nx_dict = self.to_nx_dict( nexus_path_version=nexus_path_version, data_path=data_path ) # retrieve virtual sources and DataUrl datasets_to_handle_in_postprocessing = {} for key in self._get_virtual_sources(nx_dict): datasets_to_handle_in_postprocessing[key] = nx_dict.pop(key) for key in self._get_data_urls(nx_dict): datasets_to_handle_in_postprocessing[key] = nx_dict.pop(key) master_vds_file = self._get_vds_master_file_folder(nx_dict) # retrieve attributes attributes = {} dataset_to_postpone = tuple(datasets_to_handle_in_postprocessing.keys()) for key, value in nx_dict.items(): if key.startswith(dataset_to_postpone): attributes[key] = value # clean attributes for key in attributes: del nx_dict[key] dicttonx( nx_dict, h5file=file_path, h5path=data_path, update_mode="replace", mode="a", ) assert os.path.exists(file_path) # in order to solve relative path we need to be on the (source) master file working directory with cwd_context(master_vds_file): # now handle nx_dict containing h5py.virtualSource or DataUrl # this cannot be handled from the nxdetector class because not aware about # the output file. for ( dataset_path, v_sources_or_data_urls, ) in datasets_to_handle_in_postprocessing.items(): data_type = None vs_shape = None n_frames = 0 v_sources_to_handle_in_postprocessing = [] # convert DataUrl to VirtualSource dataset_keys = v_sources_or_data_urls for v_source_or_data_url in dataset_keys: if isinstance(v_source_or_data_url, DataUrl): vs = from_data_url_to_virtual_source( v_source_or_data_url, target_path=master_vds_file )[0] else: assert isinstance( v_source_or_data_url, h5py.VirtualSource ), "v_source_or_data_url is not a DataUrl or a VirtualSource" vs = v_source_or_data_url if data_type is None: data_type = vs.dtype elif vs.dtype != data_type: raise TypeError( f"Virtual sources have incoherent data types (found {data_type} and {vs.dtype})" ) if not len(vs.maxshape) == 3: raise ValueError( f"Virtual sources are expected to be 3D. {len(vs.maxshape)} found" ) if vs_shape is None: vs_shape = vs.maxshape[1:] elif vs_shape != vs.maxshape[1:]: raise ValueError( f"Virtual sources are expected to have same frame dimensions. found {vs_shape} and {vs.maxshape[1:]}" ) n_frames += vs.maxshape[0] vs.path = to_target_rel_path(vs.path, file_path) v_sources_to_handle_in_postprocessing.append(vs) if n_frames == 0: # in the case there is no frame to be saved return vs_shape = [ n_frames, ] + list(vs_shape) layout = h5py.VirtualLayout(shape=tuple(vs_shape), dtype=data_type) # fill virtual dataset loc_pointer = 0 for v_source in v_sources_to_handle_in_postprocessing: layout[loc_pointer : (loc_pointer + v_source.maxshape[0])] = ( v_source ) loc_pointer += v_source.maxshape[0] with HDF5File(file_path, mode="a") as h5s: h5s.create_virtual_dataset( "/".join([entry_path, dataset_path]), layout ) # write attributes of dataset defined from a list of DataUrl or VirtualSource assert os.path.exists(file_path) dicttonx( attributes, h5file=file_path, h5path=entry_path, update_mode="add", mode="a", ) def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: """ convert the NXobject to an nx dict. Dictionary that we can dump to hdf5 file :param nexus_path_version: version of the nexus path version to use :param data_path: can be provided to create some link in the file """ raise NotImplementedError("Base class") def __str__(self) -> str: return f"{type(self)}: {self.path}" @staticmethod def _get_virtual_sources(ddict) -> tuple: """Return key / path containing a list or a tuple of h5py.VirtualSource""" def has_virtual_sources(value): if isinstance(value, h5py.VirtualSource): return True elif isinstance(value, (list, tuple)): for v in value: if has_virtual_sources(v): return True return False keys = [] for key, value in ddict.items(): if has_virtual_sources(value): keys.append(key) return tuple(keys) @staticmethod def _get_vds_master_file_folder(nx_dict: dict): path = nx_dict.pop("__vds_master_file__", None) if path is not None: return os.path.dirname(path) else: return None @staticmethod def _get_data_urls(ddict) -> tuple: """Return key / path containing a list or a tuple of silx.io.url.DataUrl""" def has_data_url(value): if isinstance(value, DataUrl): return True elif isinstance(value, (list, tuple)): for v in value: if has_data_url(v): return True return False keys = [] for key, value in ddict.items(): if has_data_url(value): keys.append(key) return tuple(keys) def __setattr__(self, __name, __value): if self.__isfrozen and not hasattr(self, __name): raise AttributeError("can't set attribute", __name) else: super().__setattr__(__name, __value) @staticmethod def concatenate(nx_objects: tuple, node_name: str): """ concatenate a tuple of NXobject into a single NXobject :param Iterable Nx-objects: nx object to concatenate :param node_name: name of the node to create. Parent must be handled manually for now. """ raise NotImplementedError("Base class") ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757728.0 nxtomo-1.3.0.dev9/nxtomo/nxobject/nxsample.py0000644000175000017500000002475014676676640020402 0ustar00paynopayno""" module for handling a `nxsample `_ """ from __future__ import annotations import logging from functools import partial from operator import is_not from typing import Iterable import numpy from silx.utils.proxy import docstring from nxtomo.paths.nxtomo import get_paths as get_nexus_paths from pyunitsystem.metricsystem import MetricSystem from nxtomo.nxobject.nxobject import ElementWithUnit, NXobject from nxtomo.nxobject.nxtransformations import NXtransformations from nxtomo.utils import cast_and_check_array_1D, get_data, get_data_and_unit _logger = logging.getLogger(__name__) __all__ = [ "NXsample", ] class NXsample(NXobject): def __init__(self, node_name="sample", parent: NXobject | None = None) -> None: """ representation of `nexus NXsample `_. A monitor of incident beam data. :param node_name: name of the detector in the hierarchy :param parent: parent in the nexus hierarchy """ super().__init__(node_name=node_name, parent=parent) self._set_freeze(False) self._name = None self._rotation_angle = None self.rocking = None self.n_steps_rocking = None self.n_steps_rotation = None self._x_translation = ElementWithUnit(default_unit=MetricSystem.METER) self._y_translation = ElementWithUnit(default_unit=MetricSystem.METER) self._z_translation = ElementWithUnit(default_unit=MetricSystem.METER) self._transformations = tuple() self._set_freeze(True) @property def name(self) -> str | None: """sample name""" return self._name @name.setter def name(self, name: str | None) -> None: if not isinstance(name, (type(None), str)): raise TypeError(f"name is expected to be None or str not {type(name)}") self._name = name @property def rotation_angle(self) -> numpy.ndarray | None: """sample rotation angle. One per frame""" return self._rotation_angle @rotation_angle.setter def rotation_angle(self, rotation_angle: Iterable | None): self._rotation_angle = cast_and_check_array_1D(rotation_angle, "rotation_angle") @property def x_translation(self) -> numpy.ndarray | None: """sample translation along x. See `modelling at esrf `_ for more information""" return self._x_translation @x_translation.setter def x_translation(self, x_translation: Iterable | None): self._x_translation.value = cast_and_check_array_1D( x_translation, "x_translation" ) @property def y_translation(self) -> numpy.ndarray | None: """sample translation along y. See `modelling at esrf `_ for more information""" return self._y_translation @y_translation.setter def y_translation(self, y_translation: Iterable | None): self._y_translation.value = cast_and_check_array_1D( y_translation, "y_translation" ) @property def z_translation(self) -> numpy.ndarray | None: """sample translation along z. See `modelling at esrf `_ for more information""" return self._z_translation @z_translation.setter def z_translation(self, z_translation: Iterable | None): self._z_translation.value = cast_and_check_array_1D( z_translation, "z_translation" ) @property def transformations(self) -> tuple[NXtransformations]: """detector transformations as `NXtransformations `_""" return self._transformations @transformations.setter def transformations(self, transformations: tuple[NXtransformations]): if not isinstance(transformations, tuple): raise TypeError for transformation in transformations: if not isinstance(transformation, NXtransformations): raise TypeError @docstring(NXobject) def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: nexus_paths = get_nexus_paths(nexus_path_version) nexus_sample_paths = nexus_paths.nx_sample_paths nx_dict = {} if self.name is not None: path_name = f"{self.path}/{nexus_sample_paths.NAME}" nx_dict[path_name] = self.name if self.rotation_angle is not None: path_rotation_angle = f"{self.path}/{nexus_sample_paths.ROTATION_ANGLE}" nx_dict[path_rotation_angle] = self.rotation_angle nx_dict["@".join([path_rotation_angle, "units"])] = "degree" if self.rocking is not None: path_rocking = f"{self.path}/{nexus_sample_paths.ROCKING}" nx_dict[path_rocking] = self.rocking if self.n_steps_rocking is not None: path_n_steps_rocking = f"{self.path}/{nexus_sample_paths.N_STEPS_ROCKING}" nx_dict[path_n_steps_rocking] = self.n_steps_rocking if self.n_steps_rotation is not None: path_n_steps_rotation = f"{self.path}/{nexus_sample_paths.N_STEPS_ROTATION}" nx_dict[path_n_steps_rotation] = self.n_steps_rotation if self.x_translation.value is not None: path_x_translation = f"{self.path}/{nexus_sample_paths.X_TRANSLATION}" nx_dict[path_x_translation] = self.x_translation.value nx_dict["@".join([path_x_translation, "units"])] = str( self.x_translation.unit ) if self.y_translation.value is not None: path_y_translation = f"{self.path}/{nexus_sample_paths.Y_TRANSLATION}" nx_dict[path_y_translation] = self.y_translation.value nx_dict["@".join([path_y_translation, "units"])] = str( self.y_translation.unit ) if self.z_translation.value is not None: path_z_translation = f"{self.path}/{nexus_sample_paths.Z_TRANSLATION}" nx_dict[path_z_translation] = self.z_translation.value nx_dict["@".join([path_z_translation, "units"])] = str( self.z_translation.unit ) if nx_dict != {}: nx_dict[f"{self.path}@NX_class"] = "NXsample" return nx_dict def _load(self, file_path: str, data_path: str, nexus_version: float) -> NXobject: """ Create and load an NXsample from data on disk """ nexus_paths = get_nexus_paths(nexus_version) nexus_sample_paths = nexus_paths.nx_sample_paths self.name = get_data( file_path=file_path, data_path="/".join([data_path, nexus_sample_paths.NAME]), ) self.rotation_angle, angle_unit = get_data_and_unit( file_path=file_path, data_path="/".join([data_path, nexus_sample_paths.ROTATION_ANGLE]), default_unit="degree", ) if angle_unit == "degree": pass elif isinstance(angle_unit, str) and angle_unit.lower() in ("rad", "radian"): self.rotation_angle = numpy.rad2deg(self.rotation_angle) elif angle_unit is not None: raise ValueError(f"rotation angle unit not recognized: {angle_unit}") self.x_translation, self.x_translation.unit = get_data_and_unit( file_path=file_path, data_path="/".join([data_path, nexus_sample_paths.X_TRANSLATION]), default_unit=MetricSystem.METER, ) self.y_translation, self.y_translation.unit = get_data_and_unit( file_path=file_path, data_path="/".join([data_path, nexus_sample_paths.Y_TRANSLATION]), default_unit=MetricSystem.METER, ) self.z_translation, self.z_translation.unit = get_data_and_unit( file_path=file_path, data_path="/".join([data_path, nexus_sample_paths.Z_TRANSLATION]), default_unit=MetricSystem.METER, ) @staticmethod @docstring(NXobject) def concatenate(nx_objects: tuple, node_name="sample"): nx_objects = tuple(filter(partial(is_not, None), nx_objects)) # filter None obj if len(nx_objects) == 0: return None # warning: later we make the assumption that nx_objects contains at least one element for nx_obj in nx_objects: if not isinstance(nx_obj, NXsample): raise TypeError("Cannot concatenate non NXsample object") nx_sample = NXsample(node_name) _logger.info(f"sample name {nx_objects[0].name} will be picked") nx_sample.name = nx_objects[0].name rotation_angles = [ nx_obj.rotation_angle for nx_obj in nx_objects if nx_obj.rotation_angle is not None ] if len(rotation_angles) > 0: nx_sample.rotation_angle = numpy.concatenate(rotation_angles) x_translations = [ nx_obj.x_translation.value * nx_obj.x_translation.unit.value for nx_obj in nx_objects if ( nx_obj.x_translation is not None and nx_obj.x_translation.value is not None ) ] if len(x_translations) > 0: nx_sample.x_translation = numpy.concatenate(x_translations) y_translations = [ nx_obj.y_translation.value * nx_obj.y_translation.unit.value for nx_obj in nx_objects if ( nx_obj.y_translation is not None and nx_obj.y_translation.value is not None ) ] if len(y_translations) > 0: nx_sample.y_translation = numpy.concatenate(y_translations) z_translations = [ nx_obj.z_translation.value * nx_obj.z_translation.unit.value for nx_obj in nx_objects if ( nx_obj.z_translation is not None and nx_obj.z_translation.value is not None ) ] if len(z_translations) > 0: nx_sample.z_translation = numpy.concatenate(z_translations) rocking_list = list( filter( partial(is_not, None), [nx_obj.rocking for nx_obj in nx_objects], ) ) if len(rocking_list) > 0: nx_sample.rocking = numpy.concatenate(rocking_list) return nx_sample ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729761091.0 nxtomo-1.3.0.dev9/nxtomo/nxobject/nxsource.py0000644000175000017500000002114414706407503020373 0ustar00paynopayno""" module for handling a `nxsource `_ """ from __future__ import annotations import logging from functools import partial from operator import is_not import numpy from silx.utils.enum import Enum as _Enum from silx.utils.proxy import docstring from pyunitsystem.metricsystem import MetricSystem from nxtomo.paths.nxtomo import get_paths as get_nexus_paths from nxtomo.nxobject.nxobject import NXobject, ElementWithUnit from nxtomo.utils import get_data, get_data_and_unit _logger = logging.getLogger(__name__) __all__ = ["SourceType", "ProbeType", "NXsource", "DefaultESRFSource"] class SourceType(_Enum): """ source types like 'Synchrotron X-ray Source' or 'Free-Electron Laser' """ SPALLATION_NEUTRON = "Spallation Neutron Source" PULSED_REACTOR_NEUTRON_SOURCE = "Pulsed Reactor Neutron Source" REACTOR_NEUTRON_SOURCE = "Reactor Neutron Source" SYNCHROTRON_X_RAY_SOURCE = "Synchrotron X-ray Source" PULSED_MUON_SOURCE = "Pulsed Muon Source" ROTATING_ANODE_X_RAY = "Rotating Anode X-ray" FIXED_TUBE_X_RAY = "Fixed Tube X-ray" UV_LASER = "UV Laser" FREE_ELECTRON_LASER = "Free-Electron Laser" OPTICAL_LASER = "Optical Laser" ION_SOURCE = "Ion Source" UV_PLASMA_SOURCE = "UV Plasma Source" METAL_JET_X_RAY = "Metal Jet X-ray" class ProbeType(_Enum): """ probe type like 'x-ray' or 'neutron' """ NEUTRON = "neutron" X_RAY = "x-ray" MUON = "muon" ELECTRON = "electron" ULTRAVIOLET = "ultraviolet" VISIBLE_LIGHT = "visible light" POSITRON = "positron" PROTON = "proton" class NXsource(NXobject): """Information regarding the x-ray storage ring/facility""" def __init__( self, node_name="source", parent=None, source_name=None, source_type=None, probe=None, ): """ representation of `nexus NXsource `_. The neutron or x-ray storage ring/facility. :param node_name: name of the detector in the hierarchy :param parent: parent in the nexus hierarchy :param source_name: name of the source :param source_type: source type :param probe: probe """ super().__init__(node_name=node_name, parent=parent) self._set_freeze(False) self._name = source_name self._type = source_type self._probe = probe self._distance = ElementWithUnit(default_unit=MetricSystem.METER) """source / sample distance""" self._set_freeze(True) @property def name(self) -> None | str: """ source name """ return self._name @name.setter def name(self, source_name: str | None): if isinstance(source_name, numpy.ndarray): # handle Diamond Dataset source_name = source_name.tostring() if hasattr(source_name, "decode"): source_name = source_name.decode() if not isinstance(source_name, (str, type(None))): raise TypeError( f"source_name is expected to be None or a str not {type(source_name)}" ) self._name = source_name @property def type(self) -> SourceType | None: """ source type as :class:`~nxtomo.nxobject.nxsource.SourceType` """ return self._type @type.setter def type(self, type_: None | str | SourceType): if type_ is None: self._type = None else: type_ = SourceType.from_value(type_) self._type = type_ @property def probe(self) -> ProbeType | None: """ probe as :class:`~nxtomo.nxobject.nxsource.ProbeType` """ return self._probe @probe.setter def probe(self, probe: None | str | ProbeType): if probe is None: self._probe = None else: self._probe = ProbeType.from_value(probe) @property def distance(self) -> ElementWithUnit | None: return self._distance @distance.setter def distance(self, value: float | None) -> None: if not isinstance(value, (float, type(None))): raise ValueError( f"distance value is expected to be a float. {type(value)} given." ) self._distance.value = value def __str__(self): return f"{super().__str__}, (source name: {self.name}, source type: {self.type}, source probe: {self.probe})" @docstring(NXobject) def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: nexus_paths = get_nexus_paths(nexus_path_version) nexus_source_paths = nexus_paths.nx_source_paths nx_dict = {} # warning: source is integrated only since 1.1 version of the nexus path if self.name is not None and nexus_paths.SOURCE_NAME is not None: path_name = f"{self.path}/{nexus_source_paths.NAME}" nx_dict[path_name] = self.name if self.type is not None and nexus_paths.SOURCE_TYPE is not None: path_type = f"{self.path}/{nexus_source_paths.TYPE}" nx_dict[path_type] = self.type.value if self.probe is not None and nexus_paths.SOURCE_PROBE is not None: path_probe = f"{self.path}/{nexus_source_paths.PROBE}" nx_dict[path_probe] = self.probe.value if self.distance is not None and nexus_source_paths.DISTANCE is not None: path_source = f"{self.path}/{nexus_source_paths.DISTANCE}" nx_dict[path_source] = self.distance.value nx_dict["@".join([path_source, "units"])] = str(self.distance.unit) # complete the nexus metadata if not empty if nx_dict != {}: nx_dict[f"{self.path}@NX_class"] = "NXsource" return nx_dict def _load(self, file_path: str, data_path: str, nexus_version: float) -> None: nexus_paths = get_nexus_paths(nexus_version) nexus_source_paths = nexus_paths.nx_source_paths self.name = get_data( file_path=file_path, data_path="/".join([data_path, nexus_source_paths.NAME]), ) try: self.type = get_data( file_path=file_path, data_path="/".join([data_path, nexus_source_paths.TYPE]), ) except ValueError as e: _logger.warning(f"Fail to load source type. Error is {e}") try: self.probe = get_data( file_path=file_path, data_path="/".join([data_path, nexus_source_paths.PROBE]), ) except ValueError as e: _logger.warning(f"Fail to load probe. Error is {e}") try: self.distance, self.distance.unit = get_data_and_unit( file_path=file_path, data_path="/".join([data_path, nexus_source_paths.DISTANCE]), default_unit=MetricSystem.METER, ) except TypeError as e: # in case loaded pixel size doesn't fit the type (case Diamond dataset) _logger.warning(f"Fail to load distance. Error is {e}") @staticmethod @docstring(NXobject) def concatenate(nx_objects: tuple, node_name="source"): # filter None obj nx_objects = tuple(filter(partial(is_not, None), nx_objects)) if len(nx_objects) == 0: return None # warning: later we make the assumption that nx_objects contains at least one element for nx_obj in nx_objects: if not isinstance(nx_obj, NXsource): raise TypeError("Cannot concatenate non NXsource object") nx_souce = NXsource(node_name=node_name) nx_souce.name = nx_objects[0].name _logger.info(f"Take the first source name {nx_objects[0].name}") nx_souce.type = nx_objects[0].type _logger.info(f"Take the first source type {nx_objects[0].type}") nx_souce.probe = nx_objects[0].probe _logger.info(f"Take the first source probe {nx_objects[0].probe}") nx_souce.distance.value = nx_objects[0].distance.value nx_souce.distance.unit = nx_objects[0].distance.unit _logger.info(f"Take the first source distance {nx_objects[0].distance}") return nx_souce class DefaultESRFSource(NXsource): """ ESRF source """ def __init__(self, node_name="source", parent=None) -> None: super().__init__( node_name=node_name, parent=parent, source_name="ESRF", source_type=SourceType.SYNCHROTRON_X_RAY_SOURCE, probe=ProbeType.X_RAY, ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757728.0 nxtomo-1.3.0.dev9/nxtomo/nxobject/nxtransformations.py0000644000175000017500000002636414676676640022355 0ustar00paynopayno""" module for handling a `nxtransformations `_ """ from __future__ import annotations import logging import h5py from copy import deepcopy from silx.utils.proxy import docstring from silx.io.dictdump import nxtodict from silx.io.utils import open as hdf5_open from nxtomo.nxobject.nxobject import NXobject from nxtomo.utils.transformation import ( Transformation, GravityTransformation, get_lr_flip as _get_lr_flip, get_ud_flip as _get_ud_flip, ) from nxtomo.paths.nxtomo import get_paths as get_nexus_paths _logger = logging.getLogger(__name__) __all__ = ["NXtransformations", "get_lr_flip", "get_ud_flip"] class NXtransformations(NXobject): def __init__(self, node_name: str = "transformations", parent=None) -> None: """ Collection of axis-based translations and rotations to describe a geometry For tomotools the first usage would be to allow users to provide more metadata to tag acquisition (like 'detector has been rotate' of 90 degree...) :param node_name: name of the detector in the hierarchy :param parent: parent in the nexus hierarchy """ super().__init__(node_name, parent) self._set_freeze(False) self._transformations = dict() # dict with axis_name as value and Transformation as value. Simplify handling compared to a tuple / list / set and ensure the axis_name is unique self._set_freeze(True) @property def transformations(self) -> tuple: """ return dict with str as key and Transformation as value """ return tuple(self._transformations.values()) @transformations.setter def transformations(self, transformations: tuple): """ :param transformations: dict as [str, Transformation] """ # check type if not isinstance(transformations, (tuple, list)): raise TypeError( f"transformations is expected to be a dict. {type(transformations)} provided instead" ) for transformation in transformations: if not isinstance(transformation, Transformation): raise TypeError( f"element are expected to be instances of {Transformation}. {type(transformation)} provided instead" ) # convert it to a dict for convenience self._transformations = { transformation.axis_name: transformation for transformation in transformations } def addTransformation(self, *args, **kwargs): _logger.warning("addTransformation is deprecated. Please us add_transformation") self.add_transformation(*args, **kwargs) def add_transformation( self, transformation: Transformation, overwrite=False, skip_if_exists=False ): """ add a transformation to the existing one. :param transformation: transformation to be added :param overwrite: if a transformation with the same axis_name already exists then overwrite it :param skip_if_exists: if a transformation with the same axis_name already exists then keep the existing one :raises: KeyError, if a transformation with the same axis_name already registered """ if skip_if_exists is overwrite is True: raise ValueError( "both 'skip_if_exists' and 'overwrite' set to True. Undefined behavior" ) if transformation.axis_name in self._transformations: if overwrite: _logger.info( "A transformation over {transformation.axis_name} is already registered. Will overwrite it" ) elif skip_if_exists: _logger.info( "A transformation over {transformation.axis_name} is already registered. Skip add" ) return else: raise KeyError( f"A transformation over {transformation.axis_name} is already registered. axis_name must be unique" ) self._transformations[transformation.axis_name] = transformation def rmTransformation(self, *args, **kwargs): _logger.warning("rmTransformation is deprecated. Please us rm_transformation") self.rm_transformation(*args, **kwargs) def rm_transformation(self, transformation: Transformation): """ remove the provided transformation to the list of existing transformation :param Transformation transformation: transformation to be added """ self._transformations.pop(transformation.axis_name, None) @docstring(NXobject) def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, solve_empty_dependency: bool = False, ) -> dict: """ :param append_gravity: If True all transformation without dependency will be depending on a "gravity" Transformation which represent the gravity """ if len(self._transformations) == 0: # if no transformation, avoid creating the group return {} nexus_paths = get_nexus_paths(nexus_path_version) transformations_nexus_paths = nexus_paths.nx_transformations_paths if transformations_nexus_paths is None: _logger.info( f"no TRANSFORMATIONS provided for version {nexus_path_version}" ) return {} transformations = deepcopy(self._transformations) # preprocessing for gravity if solve_empty_dependency: transformations_needing_gravity = dict( filter( lambda pair: pair[1].depends_on in (None, ""), transformations.items(), ) ) if len(transformations_needing_gravity) > 0: gravity = GravityTransformation() gravity_name = gravity.axis_name if gravity_name in transformations.keys(): _logger.warning( f"transformations already contains a transformation named '{gravity.axis_name}'. Unable to expend transformation chain (cannot append twice gravity)" ) else: transformations[gravity_name] = gravity # update transformations needing gravity for transformation in transformations_needing_gravity.values(): transformation.depends_on = gravity_name # dump Transformation nx_dict = {} for transformation in transformations.values(): if not isinstance(transformation, Transformation): raise TypeError( f"transformations are expected to be instances of {Transformation}. {type(transformation)} provided instead." ) nx_dict.update( transformation.to_nx_dict( transformations_nexus_paths=transformations_nexus_paths, data_path=self.path, ) ) nx_dict[f"{self.path}@NX_class"] = "NX_transformations" nx_dict[f"{self.path}@units"] = "NX_TRANSFORMATION" return nx_dict @staticmethod def load_from_file(file_path: str, data_path: str, nexus_version: float | None): """ create an instance of :class:`~nxtomo.nxobject.nxtransformations,NXtransformations` and load it value from the given file and data path """ result = NXtransformations() return result._load( file_path=file_path, data_path=data_path, nexus_version=nexus_version ) def _load( self, file_path: str, data_path: str, nexus_version: float | None ) -> NXobject: """ Create and load an NXmonitor from data on disk """ nexus_paths = get_nexus_paths(nexus_version) transformations_nexus_paths = nexus_paths.nx_transformations_paths with hdf5_open(file_path) as h5f: if data_path == "": pass elif data_path not in h5f: _logger.error( f"No NXtransformations found in {file_path} under {data_path} location." ) return transformations_as_nx_dict = nxtodict(file_path, path=data_path) # filter attributes from the dict (as a convention dict contain '@' char) transformations_keys = dict( filter( lambda a: "@" not in a[0], transformations_as_nx_dict.items(), ) ) for key in transformations_keys: transformation = Transformation.from_nx_dict( axis_name=key, dict_=transformations_as_nx_dict, transformations_nexus_paths=transformations_nexus_paths, ) if transformation is None: # if failed to load transformation (old version of nexus ?) continue else: self.add_transformation(transformation=transformation) return self @staticmethod @docstring(NXobject) def concatenate(nx_objects: tuple, node_name="transformations"): res = NXtransformations(node_name=node_name) for nx_transformations in nx_objects: if not isinstance(nx_transformations, NXtransformations): raise TypeError for transformation in nx_transformations.transformations: res.add_transformation(transformation, skip_if_exists=True) return res def __eq__(self, __value: object) -> bool: if not isinstance(__value, NXtransformations): return False else: # to check equality we filter gravity as it can be provided at the end and as the reference def is_gravity(transformation): return transformation == GravityTransformation() return list(filter(is_gravity, self.transformations)) == list( filter(is_gravity, __value.transformations) ) @staticmethod def is_a_valid_group(group: h5py.Group) -> bool: """ check if the group fix an NXtransformations. For now the only condition is to be a group and to get NXtransformations as attr """ if not isinstance(group, h5py.Group): return False return group.attrs.get("NX_class", None) in ( "NX_transformations", "NX_TRANSFORMATIONS", ) def __len__(self): return len(self.transformations) def get_lr_flip(transformations: tuple | NXtransformations) -> tuple: """ check along all transformations if find Transformation matching 'LRTransformation' return a tuple with all matching keys """ if isinstance(transformations, NXtransformations): transformations = transformations.transformations return _get_lr_flip(transformations) def get_ud_flip(transformations: tuple | NXtransformations) -> tuple: """ check along all transformations if find Transformation matching 'UDTransformation' return a tuple with all matching keys """ if isinstance(transformations, NXtransformations): transformations = transformations.transformations return _get_ud_flip(transformations) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1729922990.4957538 nxtomo-1.3.0.dev9/nxtomo/nxobject/tests/0000755000175000017500000000000014707103656017316 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729761091.0 nxtomo-1.3.0.dev9/nxtomo/nxobject/tests/test_nxdetector.py0000644000175000017500000004517014706407503023112 0ustar00paynopaynoimport os import tempfile import h5py import numpy.random import pytest from silx.io.url import DataUrl from nxtomo.io import cwd_context from pyunitsystem.voltagesystem import VoltageSystem from nxtomo.utils.transformation import TransformationAxis from nxtomo.utils.transformation import ( Transformation, DetYFlipTransformation, DetZFlipTransformation, ) from nxtomo.nxobject.nxdetector import ( FieldOfView, NXdetector, NXdetectorWithUnit, ImageKey, ) def test_nx_detector(): """test creation and saving of an nxdetector""" nx_detector = NXdetector(expected_dim=(2, 3)) # check data with pytest.raises(TypeError): nx_detector.data = 12 # if expected dims is not fulfill with pytest.raises(ValueError): nx_detector.data = numpy.random.random(100 * 100 * 5).reshape(5, 10, 10, 100) with pytest.raises(TypeError): nx_detector.data = ( 12, 13, ) nx_detector.data = numpy.random.random(100 * 100 * 5).reshape(5, 100, 100) # check image key control with pytest.raises(TypeError): nx_detector.image_key_control = 12 nx_detector.image_key_control = [1] * 5 nx_detector.image_key_control = [ImageKey.PROJECTION] * 5 # check x and y pixel size (both 'real' and 'sample') with pytest.raises(TypeError): nx_detector.x_pixel_size = "test" nx_detector.x_pixel_size = 1e-7 with pytest.raises(TypeError): nx_detector.y_pixel_size = {} nx_detector.y_pixel_size = 2e-7 # check x_flipped and y_flipped with pytest.raises(TypeError): nx_detector.x_flipped = 12 with pytest.raises(TypeError): nx_detector.y_flipped = 12 nx_detector.x_flipped = True nx_detector.y_flipped = False # check detector distance with pytest.raises(TypeError): nx_detector.distance = "test" nx_detector.distance = 0.02 # check field of view with pytest.raises(ValueError): nx_detector.field_of_view = "test" nx_detector.field_of_view = FieldOfView.HALF # check count time with pytest.raises(TypeError): nx_detector.count_time = 12 nx_detector.count_time = [0.1] * 5 # check x, y rotation axis positions with pytest.raises(TypeError): nx_detector.x_rotation_axis_pixel_position = "toto" nx_detector.x_rotation_axis_pixel_position = 12.3 with pytest.raises(TypeError): nx_detector.y_rotation_axis_pixel_position = "toto" nx_detector.y_rotation_axis_pixel_position = 2.3 assert isinstance(nx_detector.to_nx_dict(), dict) # check we can't set undefined attributes with pytest.raises(AttributeError): nx_detector.test = 12 # test nx_detector concatenation concatenated_nx_detector = NXdetector.concatenate([nx_detector, nx_detector]) numpy.testing.assert_array_equal( concatenated_nx_detector.image_key_control, [ImageKey.PROJECTION] * 10 ) assert concatenated_nx_detector.x_pixel_size.value == 1e-7 assert concatenated_nx_detector.y_pixel_size.value == 2e-7 assert concatenated_nx_detector.distance.value == 0.02 assert nx_detector.x_rotation_axis_pixel_position == 12.3 assert nx_detector.y_rotation_axis_pixel_position == 2.3 nx_detector.field_of_view = FieldOfView.HALF nx_detector.count_time = [0.1] * 10 nx_detector.roi = None nx_detector.roi = (0, 0, 2052, 1024) with pytest.raises(TypeError): nx_detector.roi = "toto" with pytest.raises(ValueError): nx_detector.roi = (12,) def test_nx_detector_with_unit(): diode = NXdetectorWithUnit( node_name="diode", expected_dim=(1,), default_unit=VoltageSystem.VOLT, ) with pytest.raises(ValueError): diode.data = numpy.arange(10 * 10).reshape([10, 10]) with pytest.raises(TypeError): diode.data = [10, 12] with pytest.raises(TypeError): diode.data = "test" diode.data = None diode.data = numpy.random.random(12) diode.data = (DataUrl(),) # test nx_detector concatenation concatenated_nx_detector = NXdetectorWithUnit.concatenate( [diode, diode], expected_dim=(1,), default_unit=VoltageSystem.VOLT, ) assert len(concatenated_nx_detector.data.value) == 2 assert isinstance(concatenated_nx_detector.data.value[1], DataUrl) def test_nx_detector_with_virtual_source(): """Insure detector data can be write from Virtual sources""" cwd = os.getcwd() with tempfile.TemporaryDirectory() as tmp_folder: # create virtual dataset n_base_raw_dataset = 5 n_z, n_y, n_x = 4, 100, 100 base_raw_dataset_shape = (n_z, n_y, n_x) n_base_raw_dataset_elmts = n_z * n_y * n_x v_sources = [] raw_files = [ os.path.join(tmp_folder, f"raw_file_{i_file}.hdf5") for i_file in range(n_base_raw_dataset) ] for i_raw_file, raw_file in enumerate(raw_files): with h5py.File(raw_file, mode="w") as h5f: h5f["data"] = numpy.arange( start=n_base_raw_dataset_elmts * i_raw_file, stop=n_base_raw_dataset_elmts * (i_raw_file + 1), ).reshape(base_raw_dataset_shape) v_sources.append(h5py.VirtualSource(h5f["data"])) nx_detector = NXdetector() nx_detector.data = v_sources detector_file = os.path.join(tmp_folder, "detector_file.hdf5") nx_detector.save(file_path=detector_file) # check the virtual dataset has been properly created and linked with h5py.File(detector_file, mode="r") as h5f_master: dataset = h5f_master["/detector/data"] assert dataset.is_virtual for i_raw_file, raw_file in enumerate(raw_files): with h5py.File(raw_file, mode="r") as h5f_raw: numpy.testing.assert_array_equal( dataset[i_raw_file * n_z : (i_raw_file + 1) * n_z], h5f_raw["data"], ) # check attributes have been rewrite as expected assert "interpretation" in dataset.attrs # check virtual dataset is composed of relative links for vs_info in dataset.virtual_sources(): assert vs_info.file_name.startswith(".") assert cwd == os.getcwd() # check concatenation concatenated_nx_detector = NXdetector.concatenate([nx_detector, nx_detector]) assert isinstance(concatenated_nx_detector.data[1], h5py.VirtualSource) assert len(concatenated_nx_detector.data) == len(raw_files) * 2 def test_nx_detector_with_local_urls(): """Insure detector data can be write from DataUrl linking to local dataset (in the same file)""" cwd = os.getcwd() n_base_dataset = 3 n_z, n_y, n_x = 2, 10, 20 base_dataset_shape = (n_z, n_y, n_x) n_base_dataset_elmts = n_z * n_y * n_x urls = [] with tempfile.TemporaryDirectory() as tmp_folder: master_file = os.path.join(tmp_folder, "master_file.hdf5") with h5py.File(master_file, mode="a") as h5f: for i in range(n_base_dataset): data_path = f"/data_{i}" h5f[data_path] = numpy.arange( start=n_base_dataset_elmts * i, stop=n_base_dataset_elmts * (i + 1), ).reshape(base_dataset_shape) urls.append( DataUrl( file_path=master_file, data_path=data_path, scheme="silx", ) ) nx_detector = NXdetector() nx_detector.data = urls nx_detector.save(file_path=master_file) # check the virtual dataset has been properly createde and linked with h5py.File(master_file, mode="r") as h5f_master: dataset = h5f_master["/detector/data"] assert dataset.is_virtual for i in range(n_base_dataset): numpy.testing.assert_array_equal( dataset[i * n_z : (i + 1) * n_z], numpy.arange( start=n_base_dataset_elmts * i, stop=n_base_dataset_elmts * (i + 1), ).reshape(base_dataset_shape), ) # check virtual dataset is composed of relative links for vs_info in dataset.virtual_sources(): assert vs_info.file_name.startswith(".") assert cwd == os.getcwd() # check concatenation concatenated_nx_detector = NXdetector.concatenate([nx_detector, nx_detector]) assert isinstance(concatenated_nx_detector.data[1], DataUrl) assert len(concatenated_nx_detector.data) == n_base_dataset * 2 def test_nx_detector_with_external_urls(): """Insure detector data can be write from DataUrl linking to external dataset""" cwd = os.getcwd() with tempfile.TemporaryDirectory() as tmp_folder: # create virtual dataset n_base_raw_dataset = 5 n_z, n_y, n_x = 4, 100, 100 base_raw_dataset_shape = (n_z, n_y, n_x) n_base_raw_dataset_elmts = n_z * n_y * n_x urls = [] raw_files = [ os.path.join(tmp_folder, f"raw_file_{i_file}.hdf5") for i_file in range(n_base_raw_dataset) ] for i_raw_file, raw_file in enumerate(raw_files): with h5py.File(raw_file, mode="w") as h5f: h5f["data"] = numpy.arange( start=n_base_raw_dataset_elmts * i_raw_file, stop=n_base_raw_dataset_elmts * (i_raw_file + 1), ).reshape(base_raw_dataset_shape) # provide one file path each two as an absolue path if i_raw_file % 2 == 0: file_path = os.path.abspath(raw_file) else: file_path = os.path.relpath(raw_file, tmp_folder) urls.append( DataUrl( file_path=file_path, data_path="data", scheme="silx", ) ) nx_detector = NXdetector() nx_detector.data = urls detector_file = os.path.join(tmp_folder, "detector_file.hdf5") # needed as we provide some link with relative path with cwd_context(tmp_folder): nx_detector.save(file_path=detector_file) # check the virtual dataset has been properly createde and linked with h5py.File(detector_file, mode="r") as h5f_master: dataset = h5f_master["/detector/data"] assert dataset.is_virtual for i_raw_file, raw_file in enumerate(raw_files): with h5py.File(raw_file, mode="r") as h5f_raw: numpy.testing.assert_array_equal( dataset[i_raw_file * n_z : (i_raw_file + 1) * n_z], h5f_raw["data"], ) # check virtual dataset is composed of relative links for vs_info in dataset.virtual_sources(): assert vs_info.file_name.startswith(".") assert cwd == os.getcwd() # check concatenation concatenated_nx_detector = NXdetector.concatenate([nx_detector, nx_detector]) assert isinstance(concatenated_nx_detector.data[1], DataUrl) assert len(concatenated_nx_detector.data) == n_base_raw_dataset * 2 @pytest.mark.parametrize( "load_data_as, expected_type", [ ("as_numpy_array", numpy.ndarray), ("as_virtual_source", h5py.VirtualSource), ("as_data_url", DataUrl), ], ) def test_load_detector_data(tmp_path, load_data_as, expected_type): layout = h5py.VirtualLayout(shape=(4 * 2, 100, 100), dtype="i4") for n in range(0, 4): filename = os.path.join(tmp_path, "{n}.h5") with h5py.File(filename, "w") as f: f["data"] = numpy.arange(100 * 100 * 2).reshape(2, 100, 100) vsource = h5py.VirtualSource(filename, "data", shape=(2, 100, 100)) start_n = n * 2 end_n = start_n + 2 layout[start_n:end_n] = vsource output_file = os.path.join(tmp_path, "VDS.h5") with h5py.File(output_file, "w") as f: f.create_virtual_dataset("data", layout, fillvalue=-5) nx_detector = NXdetector() nx_detector._load( file_path=output_file, data_path="/", load_data_as=load_data_as, nexus_version=None, ) if expected_type is numpy.ndarray: assert isinstance(nx_detector.data, expected_type) else: for elmt in nx_detector.data: assert isinstance(elmt, expected_type) nx_detector.save(os.path.join(tmp_path, "output_file.nx")) def test_nxtransformations_with_nxdetector(tmp_path): """ test behavior of an Nxtransformations with an NXtomo and coherence between lr_flip / ud_flip API (provided for convenience) and providing directly the transformations """ def build_detector(): nx_detector = NXdetector(expected_dim=(2, 3)) nx_detector.data = numpy.random.random(100 * 100 * 5).reshape(5, 100, 100) nx_detector.image_key_control = [1] * 5 nx_detector.image_key_control = [ImageKey.PROJECTION] * 5 return nx_detector nx_detector_1 = build_detector() nx_detector_2 = build_detector() # test having a left-right flip nx_detector_1.transformations.add_transformation( Transformation( axis_name="rz", value=180, transformation_type="rotation", vector=TransformationAxis.AXIS_Z, ) ) nx_detector_2.x_flipped = True assert ( nx_detector_1.transformations.to_nx_dict() == nx_detector_2.transformations.to_nx_dict() ) # test having a up-down flip nx_detector_3 = build_detector() nx_detector_4 = build_detector() nx_detector_3.transformations.add_transformation( Transformation( axis_name="ry", value=180, transformation_type="rotation", vector=TransformationAxis.AXIS_Y, ) ) nx_detector_4.y_flipped = True assert ( nx_detector_3.transformations.to_nx_dict() == nx_detector_4.transformations.to_nx_dict() ) # having both lr and ud nx_detector_5 = build_detector() nx_detector_6 = build_detector() nx_detector_5.transformations.add_transformation( Transformation( axis_name="ry", value=180, transformation_type="rotation", vector=TransformationAxis.AXIS_Y, ) ) nx_detector_5.transformations.add_transformation( Transformation( axis_name="rz", value=180, transformation_type="rotation", vector=TransformationAxis.AXIS_Z, ) ) nx_detector_6.x_flipped = True nx_detector_6.y_flipped = True assert ( nx_detector_5.transformations.to_nx_dict() == nx_detector_6.transformations.to_nx_dict() ) def test_several_nxtransformations(tmp_path): """ try loading NXtransformations """ file_path = str(tmp_path / "test_transformations.nx") nx_detector = NXdetector(expected_dim=(2, 3)) nx_detector.data = numpy.random.random(100 * 100 * 5).reshape(5, 100, 100) nx_detector.image_key_control = [1] * 5 nx_detector.image_key_control = [ImageKey.PROJECTION] * 5 nx_detector.transformations.add_transformation(DetZFlipTransformation(flip=True)) nx_detector.save(file_path=file_path, data_path="detector", nexus_path_version=1.3) # test 1: one detector with one NXtransformations stored at the default location load_det = NXdetector() load_det._load( file_path=file_path, data_path="detector", load_data_as="as_numpy_array", nexus_version=1.3, ) assert ( len(load_det.transformations.transformations) == 2 ) # the DetZFlipTransformation + gravity # test2: two transformations - one stored at the default location with h5py.File(file_path, mode="a") as h5f: assert "detector/transformations" in h5f h5f["detector"].copy(source="transformations", dest="new_transformations") load_det = NXdetector() load_det._load( file_path=file_path, data_path="detector", load_data_as="as_numpy_array", nexus_version=1.3, ) assert ( len(load_det.transformations.transformations) == 2 ) # the DetZFlipTransformation + gravity # test3: two transformations - none at the default location with h5py.File(file_path, mode="a") as h5f: assert "detector/transformations" in h5f h5f["detector"].move(source="transformations", dest="new_new_transformations") load_det = NXdetector() with pytest.raises(ValueError): load_det._load( file_path=file_path, data_path="detector", load_data_as="as_numpy_array", nexus_version=1.3, ) # test4: one transformation - not stored at the default location with h5py.File(file_path, mode="a") as h5f: del h5f["detector/new_new_transformations"] load_det = NXdetector() load_det._load( file_path=file_path, data_path="detector", load_data_as="as_numpy_array", nexus_version=1.3, ) assert ( len(load_det.transformations.transformations) == 2 ) # the DetZFlipTransformation + gravity def test_detector_flips(tmp_path): """ Make sure the 'deprecated' API 'x_flip' and 'y_flip' are working """ # build some default detector nx_detector = NXdetector(expected_dim=(2, 3)) nx_detector.data = numpy.random.random(100 * 100 * 5).reshape(5, 100, 100) nx_detector.image_key_control = [1] * 5 nx_detector.image_key_control = [ImageKey.PROJECTION] * 5 nx_detector.y_flipped = True assert ( DetZFlipTransformation(flip=True) not in nx_detector.transformations.transformations ) assert ( DetYFlipTransformation(flip=True) in nx_detector.transformations.transformations ) nx_detector.x_flipped = True assert ( DetZFlipTransformation(flip=True) in nx_detector.transformations.transformations ) nx_detector.x_flipped = False assert ( DetZFlipTransformation(flip=True) not in nx_detector.transformations.transformations ) file_path = os.path.join(tmp_path, "test_nx_detectors") nx_detector.save(file_path=file_path, data_path="detector") loaded_nx_detector = NXdetector() loaded_nx_detector._load( file_path=file_path, data_path="detector", load_data_as="as_numpy_array", nexus_version=1.3, ) assert len(loaded_nx_detector.transformations) == 3 assert loaded_nx_detector.x_flipped is False assert loaded_nx_detector.y_flipped is True ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757728.0 nxtomo-1.3.0.dev9/nxtomo/nxobject/tests/test_nxinstrument.py0000644000175000017500000000246514676676640023531 0ustar00paynopaynoimport pytest from nxtomo.nxobject.nxdetector import NXdetector from nxtomo.nxobject.nxinstrument import NXinstrument from nxtomo.nxobject.nxsource import DefaultESRFSource, NXsource def test_nx_instrument(): """test creation and saving of an nxinstrument""" nx_instrument = NXinstrument() # check data with pytest.raises(TypeError): nx_instrument.detector = 12 nx_instrument.detector = NXdetector(node_name="test") with pytest.raises(TypeError): nx_instrument.diode = 12 nx_instrument.diode = NXdetector(node_name="test 2") with pytest.raises(TypeError): nx_instrument.source = 12 nx_instrument.source = DefaultESRFSource() with pytest.raises(TypeError): nx_instrument.diode = NXsource(node_name="my source") nx_instrument.diode = NXdetector(node_name="det34") assert isinstance(nx_instrument.to_nx_dict(), dict) with pytest.raises(TypeError): nx_instrument.name = 12 nx_instrument.name = "test name" assert nx_instrument.name == "test name" # check we can't set undefined attributes with pytest.raises(AttributeError): nx_instrument.test = 12 # test concatenation nx_instrument_concat = NXinstrument.concatenate([nx_instrument, nx_instrument]) assert nx_instrument_concat.name == "test name" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757728.0 nxtomo-1.3.0.dev9/nxtomo/nxobject/tests/test_nxmonitor.py0000644000175000017500000000206514676676640023004 0ustar00paynopaynoimport numpy import pytest from nxtomo.nxobject.utils import concatenate from nxtomo.nxobject.nxmonitor import NXmonitor def test_nx_sample(): """test creation and saving of an nxsource""" nx_monitor = NXmonitor() # check name with pytest.raises(TypeError): nx_monitor.data = 12 with pytest.raises(ValueError): nx_monitor.data = numpy.zeros([12, 12]) nx_monitor.data = tuple() nx_monitor.data = numpy.zeros(12) assert isinstance(nx_monitor.to_nx_dict(), dict) # test concatenate nx_monitor_1 = NXmonitor() nx_monitor_1.data = numpy.arange(10) nx_monitor_2 = NXmonitor() nx_monitor_2.data = numpy.arange(10)[::-1] nx_monitor_2.data.unit = "mA" nx_monitor_concat = concatenate([nx_monitor_1, nx_monitor_2]) assert isinstance(nx_monitor_concat, NXmonitor) numpy.testing.assert_array_equal( nx_monitor_concat.data.value, numpy.concatenate( [ nx_monitor_1.data.value, nx_monitor_2.data.value * 10e-4, ] ), ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757728.0 nxtomo-1.3.0.dev9/nxtomo/nxobject/tests/test_nxobject.py0000644000175000017500000000522314676676640022562 0ustar00paynopaynofrom __future__ import annotations import os from tempfile import TemporaryDirectory import numpy import pytest import pyunitsystem as unitsystem from nxtomo.nxobject.nxobject import ElementWithUnit, NXobject class test_nx_object: """Tets API of the nx object""" with pytest.raises(TypeError): NXobject(node_name=12) with pytest.raises(TypeError): NXobject(node_name="test", parent=12) nx_object = NXobject(node_name="NXobject") with pytest.raises(NotImplementedError): nx_object.to_nx_dict(nexus_path_version=1.0) assert nx_object.is_root is True with pytest.raises(TypeError): nx_object.node_name = 12 with pytest.raises(AttributeError): nx_object.test = 12 class MyNXObject(NXobject): def to_nx_dict( self, nexus_path_version: float | None = None, data_path: str | None = None, ) -> dict: return { f"{self.path}/test": "toto", } my_nx_object = MyNXObject(node_name="NxObject2") with TemporaryDirectory() as folder: file_path = os.path.join(folder, "my_nexus.nx") assert not os.path.exists(file_path) my_nx_object.save( file_path=file_path, data_path="/object", nexus_path_version=1.0 ) assert os.path.exists(file_path) with pytest.raises(KeyError): my_nx_object.save( file_path=file_path, data_path="/object", nexus_path_version=1.0, overwrite=False, ) my_nx_object.save( file_path=file_path, data_path="/object", nexus_path_version=1.0, overwrite=True, ) def test_ElementWithUnit(): """test the ElementWithUnit class""" elmt = ElementWithUnit(default_unit=unitsystem.MetricSystem.METER) elmt.value = 12.3 assert elmt.si_value == 12.3 elmt.unit = "cm" assert numpy.isclose(elmt.si_value, 0.123) with pytest.raises(TypeError): ElementWithUnit(default_unit=None) elmt = ElementWithUnit(default_unit=unitsystem.EnergySI.KILOELECTRONVOLT) elmt.value = 12.3 assert elmt.si_value == 12.3 * unitsystem.EnergySI.KILOELECTRONVOLT.value elmt.unit = "J" assert elmt.si_value == 12.3 str(elmt) assert str(elmt) == "12.3 J" elmt = ElementWithUnit(default_unit=unitsystem.TimeSystem.SECOND) elmt.value = 8.0 assert elmt.si_value == 8.0 elmt.unit = "minute" elmt.si_value == 8.0 / 60.0 str(elmt) with pytest.raises(ValueError): elmt.unit = "not minute" with pytest.raises(TypeError): elmt.unit = 123 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757728.0 nxtomo-1.3.0.dev9/nxtomo/nxobject/tests/test_nxsample.py0000644000175000017500000000403414676676640022574 0ustar00paynopaynoimport numpy import pytest from nxtomo.nxobject.nxsample import NXsample def test_nx_sample(): """test creation and saving of an nxsource""" nx_sample = NXsample() # check name with pytest.raises(TypeError): nx_sample.name = 12 nx_sample.name = "my sample" # check rotation angle with pytest.raises(TypeError): nx_sample.rotation_angle = 56 nx_sample.rotation_angle = numpy.linspace(0, 180, 180, endpoint=False) # check x translation with pytest.raises(TypeError): nx_sample.x_translation = 56 nx_sample.x_translation = numpy.linspace(0, 180, 180, endpoint=False) # check y translation with pytest.raises(TypeError): nx_sample.y_translation = 56 nx_sample.y_translation = [0.0] * 180 # check z translation with pytest.raises(TypeError): nx_sample.x_translation = 56 nx_sample.z_translation = None assert isinstance(nx_sample.to_nx_dict(), dict) # check we can't set undefined attributes with pytest.raises(AttributeError): nx_sample.test = 12 # test concatenation nx_sample_concat = NXsample.concatenate([nx_sample, nx_sample]) assert nx_sample_concat.name == "my sample" numpy.testing.assert_array_equal( nx_sample_concat.rotation_angle, numpy.concatenate( [ numpy.linspace(0, 180, 180, endpoint=False), numpy.linspace(0, 180, 180, endpoint=False), ] ), ) numpy.testing.assert_array_equal( nx_sample_concat.x_translation.value, numpy.concatenate( [ numpy.linspace(0, 180, 180, endpoint=False), numpy.linspace(0, 180, 180, endpoint=False), ] ), ) numpy.testing.assert_array_equal( nx_sample_concat.y_translation.value, numpy.concatenate( [ numpy.asarray([0.0] * 180), numpy.asarray([0.0] * 180), ] ), ) assert nx_sample_concat.z_translation.value is None ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729761091.0 nxtomo-1.3.0.dev9/nxtomo/nxobject/tests/test_nxsource.py0000644000175000017500000000237314706407503022577 0ustar00paynopaynoimport pytest from nxtomo.nxobject.nxsource import NXsource def test_nx_source(): """test creation and saving of an nxsource""" nx_source = NXsource() with pytest.raises(TypeError): nx_source.name = 12 nx_source.name = "my source" with pytest.raises(AttributeError): nx_source.source_name = "test" with pytest.raises(ValueError): nx_source.type = "toto" nx_source.type = "Synchrotron X-ray Source" str(nx_source) nx_source.type = None str(nx_source) assert nx_source.probe is None nx_source.probe = "neutron" assert nx_source.probe.value == "neutron" with pytest.raises(ValueError): nx_source.probe = 12 assert nx_source.distance.value is None nx_source.distance = 12.6 with pytest.raises(ValueError): nx_source.distance = "ddsad" assert isinstance(nx_source.to_nx_dict(), dict) # check we can't set undefined attributes with pytest.raises(AttributeError): nx_source.test = 12 # test some concatenation nx_source_concatenate = NXsource.concatenate([nx_source, nx_source]) assert nx_source_concatenate.name == "my source" assert nx_source_concatenate.type is None assert nx_source_concatenate.probe.value == "neutron" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757728.0 nxtomo-1.3.0.dev9/nxtomo/nxobject/tests/test_nxtransformations.py0000644000175000017500000001255714676676640024555 0ustar00paynopaynoimport numpy import pytest from nxtomo.nxobject.nxtransformations import NXtransformations from nxtomo.utils.transformation import ( Transformation, TransformationAxis, GravityTransformation, ) def test_nx_transforamtions(tmp_path): """test creation and saving of an NXtransformations""" nx_transformations_1 = NXtransformations() with pytest.raises(TypeError): nx_transformations_1.transformations = 12 with pytest.raises(TypeError): nx_transformations_1.transformations = {12: 12} translation_along_x = Transformation( axis_name="tx", value=9.6, transformation_type="translation", vector=TransformationAxis.AXIS_X, ) nx_transformations_1.add_transformation( transformation=translation_along_x, ) rotation_along_z = Transformation( axis_name="rz", value=90, transformation_type="rotation", vector=TransformationAxis.AXIS_Z, ) rotation_along_z.offset = (12.0, 0, 0) assert numpy.array_equal(rotation_along_z.offset, numpy.array([12.0, 0, 0])) rotation_along_z.units = "degree" rotation_along_z.depends_on = "tx" assert rotation_along_z.depends_on == "tx" with pytest.raises(AttributeError): rotation_along_z.vector = TransformationAxis.AXIS_Z assert rotation_along_z.vector == (0, 0, 1) nx_transformations_1.add_transformation( rotation_along_z, ) assert len(nx_transformations_1.transformations) == 2 assert nx_transformations_1.to_nx_dict(data_path="") == { # ty specifics "tx": 9.6, "tx@transformation_type": "translation", "tx@units": "m", "tx@vector": (1, 0, 0), "tx@offset": (0, 0, 0), # tx specifics "rz": 90, "rz@depends_on": "tx", "rz@offset": (12.0, 0, 0), "rz@transformation_type": "rotation", "rz@units": "degree", "rz@vector": (0, 0, 1), # class attributes "@NX_class": "NX_transformations", "@units": "NX_TRANSFORMATION", } # check solving empty dependancy assert nx_transformations_1.to_nx_dict( data_path="", solve_empty_dependency=True ) == { # ty specifics "tx": 9.6, "tx@transformation_type": "translation", "tx@units": "m", "tx@vector": (1, 0, 0), "tx@offset": (0, 0, 0), "tx@depends_on": "gravity", # tx specifics "rz": 90, "rz@depends_on": "tx", "rz@offset": (12.0, 0, 0), "rz@transformation_type": "rotation", "rz@units": "degree", "rz@vector": (0, 0, 1), # gravity "gravity": numpy.nan, "gravity@offset": (0, 0, 0), "gravity@transformation_type": "translation", "gravity@units": "m/s2", "gravity@vector": (0, 0, -1), # class attributes "@NX_class": "NX_transformations", "@units": "NX_TRANSFORMATION", } nx_transformations_2 = NXtransformations() nx_transformations_2.transformations = ( Transformation("rx", 60, "rotation", vector=TransformationAxis.AXIS_X), Transformation("rz", -60, "rotation", vector=TransformationAxis.AXIS_Z), ) assert NXtransformations.concatenate( [nx_transformations_2, nx_transformations_1] ).transformations == ( Transformation("rx", 60, "rotation", vector=TransformationAxis.AXIS_X), Transformation("rz", -60, "rotation", vector=TransformationAxis.AXIS_Z), translation_along_x, ) assert NXtransformations.concatenate( [nx_transformations_1, nx_transformations_2] ).transformations != ( translation_along_x, Transformation("rx", 60, "rotation", vector=TransformationAxis.AXIS_X), Transformation("rz", -60, "rotation", vector=TransformationAxis.AXIS_Z), ) # save NXtransformation to file and load it output_file_path = str(tmp_path / "test_nxtransformations.nx") nx_transformations_2.save(output_file_path, "transformations") assert len(nx_transformations_2.transformations) == 2 # test backward compatibility loaded_transformations = NXtransformations()._load( output_file_path, "transformations", 1.2 ) assert isinstance(loaded_transformations, NXtransformations) assert len(loaded_transformations.transformations) == 0 # test backward compatibility loaded_transformations = NXtransformations()._load( output_file_path, "transformations", 1.3 ) assert isinstance(loaded_transformations, NXtransformations) assert len(loaded_transformations.transformations) == 2 assert loaded_transformations == nx_transformations_2 # check that Gravity will not affect the equality nx_transformations_2.add_transformation(GravityTransformation()) assert loaded_transformations == nx_transformations_2 loaded_transformations.add_transformation(GravityTransformation()) assert loaded_transformations == nx_transformations_2 output_file_path_2 = str(tmp_path / "test_nxtransformations.nx") nx_transformations_2.save(output_file_path_2, "/entry/toto/transformations") loaded_transformations = NXtransformations()._load( output_file_path_2, "/entry/toto/transformations", 1.3 ) assert isinstance(loaded_transformations, NXtransformations) assert len(loaded_transformations.transformations) == 3 assert loaded_transformations == nx_transformations_2 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1723572514.0 nxtomo-1.3.0.dev9/nxtomo/nxobject/utils.py0000644000175000017500000000136114656720442017670 0ustar00paynopayno""" utils for NXobject """ from typing import Iterable from nxtomo.nxobject.nxobject import NXobject def concatenate(nx_objects: Iterable, **kwargs) -> NXobject: """ concatenate a list of NXobjects :param Iterable nx_objects: objects to be concatenated. They are expected to be of the same type. :param kwargs: extra parameters :return: concatenated object. Of the same type of 'nx_objects' :rtype: :class:`~nxtomo.nxobject.nxobject.NXobject` """ if len(nx_objects) == 0: return None else: if not isinstance(nx_objects[0], NXobject): raise TypeError("nx_objects are expected to be instances of NXobject") return type(nx_objects[0]).concatenate(nx_objects=nx_objects, **kwargs) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1729922990.4957538 nxtomo-1.3.0.dev9/nxtomo/paths/0000755000175000017500000000000014707103656015457 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1710441677.0 nxtomo-1.3.0.dev9/nxtomo/paths/__init__.py0000644000175000017500000000007614574642315017575 0ustar00paynopayno"""paths of the different components in nxtomo application""" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729921497.0 nxtomo-1.3.0.dev9/nxtomo/paths/nxdetector.py0000644000175000017500000000334314707100731020202 0ustar00paynopayno"""nexus path used to define a `NXdetector `_""" class NEXUS_DETECTOR_PATH: DATA = "data" IMAGE_KEY_CONTROL = "image_key_control" IMAGE_KEY = "image_key" X_PIXEL_SIZE = "x_pixel_size" Y_PIXEL_SIZE = "y_pixel_size" X_PIXEL_SIZE_MAGNIFIED = "x_magnified_pixel_size" Y_PIXEL_SIZE_MAGNIFIED = "y_magnified_pixel_size" X_REAL_PIXEL_SIZE = "real_x_pixel_size" Y_REAL_PIXEL_SIZE = "real_y_pixel_size" MAGNIFICATION = "magnification" DISTANCE = "distance" FOV = "field_of_view" ESTIMATED_COR_FRM_MOTOR = "estimated_cor_from_motor" "warning: replace by Y_ROTATION_AXIS_PIXEL_POSITION" ROI = "roi" EXPOSURE_TIME = "count_time" X_FLIPPED = "x_flipped" Y_FLIPPED = "y_flipped" NX_TRANSFORMATIONS = None # path in the NXdetector where are store the transformations X_ROTATION_AXIS_PIXEL_POSITION = None Y_ROTATION_AXIS_PIXEL_POSITION = None class NEXUS_DETECTOR_PATH_V_1_0(NEXUS_DETECTOR_PATH): pass class NEXUS_DETECTOR_PATH_V_1_1(NEXUS_DETECTOR_PATH): pass class NEXUS_DETECTOR_PATH_V_1_2(NEXUS_DETECTOR_PATH_V_1_1): pass class NEXUS_DETECTOR_PATH_V_1_3(NEXUS_DETECTOR_PATH_V_1_2): # in this version we expect `x_flipped`, `y_flipped` to be replaced by ̀TRANSFORMATIONS` NXtransformations group NX_TRANSFORMATIONS = "transformations" X_FLIPPED = None Y_FLIPPED = None class NEXUS_DETECTOR_PATH_V_1_4(NEXUS_DETECTOR_PATH_V_1_3): ESTIMATED_COR_FRM_MOTOR = None # replaced by 'X_ROTATION_AXIS_PIXEL_POSITION' X_ROTATION_AXIS_PIXEL_POSITION = "x_rotation_axis_pixel_position" Y_ROTATION_AXIS_PIXEL_POSITION = "y_rotation_axis_pixel_position" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729761091.0 nxtomo-1.3.0.dev9/nxtomo/paths/nxinstrument.py0000644000175000017500000000125014706407503020602 0ustar00paynopayno"""nexus path used to define a `NXinstrument `_""" class NEXUS_INSTRUMENT_PATH: DETECTOR_PATH = "detector" DIODE = None SOURCE = None BEAM = None NAME = None class NEXUS_INSTRUMENT_PATH_V_1_0(NEXUS_INSTRUMENT_PATH): pass class NEXUS_INSTRUMENT_PATH_V_1_1(NEXUS_INSTRUMENT_PATH_V_1_0): SOURCE = "source" BEAM = "beam" NAME = "name" class NEXUS_INSTRUMENT_PATH_V_1_2(NEXUS_INSTRUMENT_PATH_V_1_1): DIODE = "diode" class NEXUS_INSTRUMENT_PATH_V_1_3(NEXUS_INSTRUMENT_PATH_V_1_2): pass class NEXUS_INSTRUMENT_PATH_V_1_4(NEXUS_INSTRUMENT_PATH_V_1_3): pass ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729761091.0 nxtomo-1.3.0.dev9/nxtomo/paths/nxmonitor.py0000644000175000017500000000077214706407503020071 0ustar00paynopayno"""nexus path used to define a `NXmonitor `_""" class NEXUS_MONITOR_PATH: DATA_PATH = "data" class NEXUS_MONITOR_PATH_V_1_0(NEXUS_MONITOR_PATH): pass class NEXUS_MONITOR_PATH_V_1_1(NEXUS_MONITOR_PATH_V_1_0): pass class NEXUS_MONITOR_PATH_V_1_2(NEXUS_MONITOR_PATH_V_1_1): pass class NEXUS_MONITOR_PATH_V_1_3(NEXUS_MONITOR_PATH_V_1_2): pass class NEXUS_MONITOR_PATH_V_1_4(NEXUS_MONITOR_PATH_V_1_3): pass ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729761091.0 nxtomo-1.3.0.dev9/nxtomo/paths/nxsample.py0000644000175000017500000000176014706407503017661 0ustar00paynopayno"""nexus path used to define a `NXsample `_""" from . import nxtransformations class NEXUS_SAMPLE_PATH: NAME = "sample_name" ROTATION_ANGLE = "rotation_angle" X_TRANSLATION = "x_translation" Y_TRANSLATION = "y_translation" Z_TRANSLATION = "z_translation" ROCKING = "rocking" BASE_TILT = "base_tilt" N_STEPS_ROCKING = "n_step_rocking" N_STEPS_ROTATION = "n_step_rotation" NX_TRANSFORMATIONS = None NX_TRANSFORMATIONS_PATHS = None class NEXUS_SAMPLE_PATH_V_1_0(NEXUS_SAMPLE_PATH): pass class NEXUS_SAMPLE_PATH_V_1_1(NEXUS_SAMPLE_PATH_V_1_0): NAME = "name" class NEXUS_SAMPLE_PATH_V_1_2(NEXUS_SAMPLE_PATH_V_1_1): pass class NEXUS_SAMPLE_PATH_V_1_3(NEXUS_SAMPLE_PATH_V_1_2): NX_TRANSFORMATIONS = "transformations" NX_TRANSFORMATIONS_PATHS = nxtransformations.NEXUS_TRANSFORMATIONS_PATH_V_1_3 class NEXUS_SAMPLE_PATH_V_1_4(NEXUS_SAMPLE_PATH_V_1_3): pass ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729761091.0 nxtomo-1.3.0.dev9/nxtomo/paths/nxsource.py0000644000175000017500000000106314706407503017674 0ustar00paynopayno"""nexus path used to define a `NXsource `_""" class NEXUS_SOURCE_PATH: NAME = "name" TYPE = "type" PROBE = "probe" DISTANCE = None class NEXUS_SOURCE_PATH_V_1_0(NEXUS_SOURCE_PATH): pass class NEXUS_SOURCE_PATH_V_1_1(NEXUS_SOURCE_PATH_V_1_0): pass class NEXUS_SOURCE_PATH_V_1_2(NEXUS_SOURCE_PATH_V_1_1): pass class NEXUS_SOURCE_PATH_V_1_3(NEXUS_SOURCE_PATH_V_1_2): pass class NEXUS_SOURCE_PATH_V_1_4(NEXUS_SOURCE_PATH_V_1_3): DISTANCE = "distance" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729921562.0 nxtomo-1.3.0.dev9/nxtomo/paths/nxtomo.py0000644000175000017500000003112614707101032017342 0ustar00paynopayno"""nexus path used to define a `NXtomo `_""" from __future__ import annotations import logging from nxtomo.utils.io import deprecated import nxtomo from nxtomo.paths import ( nxdetector, nxinstrument, nxmonitor, nxsample, nxsource, nxtransformations, ) _logger = logging.getLogger(__name__) LATEST_VERSION = 1.4 class NXtomo_PATH: # list all path that can be used by an nxtomo entry and read by nxtomo. # this is also used by nxtomomill to know were to save data _NX_DETECTOR_PATHS = None _NX_INSTRUMENT_PATHS = None _NX_SAMPLE_PATHS = None _NX_SOURCE_PATHS = None _NX_CONTROL_PATHS = None _NX_TRANSFORMATIONS_PATHS = None # paths used per each transformation contained in NX_TRANSFORMATIONS VERSION = None @property def nx_detector_paths(self): return self._NX_DETECTOR_PATHS @property def nx_instrument_paths(self): return self._NX_INSTRUMENT_PATHS @property def nx_sample_paths(self): return self._NX_SAMPLE_PATHS @property def nx_source_paths(self): return self._NX_SOURCE_PATHS @property def nx_monitor_paths(self): return self._NX_CONTROL_PATHS @property def nx_transformations_paths(self): return self._NX_TRANSFORMATIONS_PATHS @property def PROJ_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.DATA, ] ) @property def SCAN_META_PATH(self) -> str: # for now scan_meta and technique are not link to any nxtomo... return "scan_meta/technique/scan" @property def INSTRUMENT_PATH(self) -> str: return "instrument" @property def CONTROL_PATH(self) -> str: return "control" @property def DET_META_PATH(self) -> str: return "scan_meta/technique/detector" @property def ROTATION_ANGLE_PATH(self): return "/".join(["sample", self.nx_sample_paths.ROTATION_ANGLE]) @property def SAMPLE_PATH(self) -> str: return "sample" @property def NAME_PATH(self) -> str: return "sample/name" @property def GRP_SIZE_ATTR(self) -> str: return "group_size" @property def SAMPLE_NAME_PATH(self) -> str: return "/".join([self.SAMPLE_PATH, self.nx_sample_paths.NAME]) @property def X_TRANS_PATH(self) -> str: return "/".join([self.SAMPLE_PATH, self.nx_sample_paths.X_TRANSLATION]) @property def Y_TRANS_PATH(self) -> str: return "/".join([self.SAMPLE_PATH, self.nx_sample_paths.Y_TRANSLATION]) @property def Z_TRANS_PATH(self) -> str: return "/".join([self.SAMPLE_PATH, self.nx_sample_paths.Z_TRANSLATION]) @property def IMG_KEY_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.IMAGE_KEY, ] ) @property def IMG_KEY_CONTROL_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.IMAGE_KEY_CONTROL, ] ) @property def X_PIXEL_SIZE_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.X_PIXEL_SIZE, ] ) @property def Y_PIXEL_SIZE_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.Y_PIXEL_SIZE, ] ) @property def X_REAL_PIXEL_SIZE_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.X_REAL_PIXEL_SIZE, ] ) @property def Y_REAL_PIXEL_SIZE_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.Y_REAL_PIXEL_SIZE, ] ) @property @deprecated(replacement="X_PIXEL_SIZE_PATH", since_version="1.1.0") def X_PIXEL_MAG_SIZE_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.X_PIXEL_SIZE_MAGNIFIED, ] ) @property @deprecated(replacement="Y_PIXEL_SIZE_PATH", since_version="1.1.0") def Y_PIXEL_MAG_SIZE_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.Y_PIXEL_SIZE_MAGNIFIED, ] ) @property def DISTANCE_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.DISTANCE, ] ) @property def FOV_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.FOV, ] ) @property def EXPOSURE_TIME_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.EXPOSURE_TIME, ] ) @property def ELECTRIC_CURRENT_PATH(self) -> str: return "/".join( [ self.CONTROL_PATH, self.nx_monitor_paths.DATA_PATH, ] ) @property @deprecated( replacement="nexuspaths.INSTRUMENT_PATH/nexuspath.nx_instrument_paths.DETECTOR_PATH/nexuspaths.detector_paths.Y_ROTATION_AXIS_PIXEL_POSITION", reason="typo", since_version="0.8.0", ) def ESTIMATED_COR_FRM_MOTOR_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, self.nx_detector_paths.Y_ROTATION_AXIS_PIXEL_POSITION, ] ) @property def TOMO_N_SCAN(self) -> str: return "/".join( [self.INSTRUMENT_PATH, self.nx_instrument_paths.DETECTOR_PATH, "tomo_n"] ) @property def BEAM_PATH(self) -> str: return "beam" @property def ENERGY_PATH(self) -> str: return f"{self.BEAM_PATH}/incident_energy" @property def START_TIME_PATH(self) -> str: return "start_time" @property def END_TIME_PATH(self) -> str: return "end_time" @property @deprecated(replacement="END_TIME_PATH", reason="typo", since_version="0.8.0") def END_TIME_START(self) -> str: return self.END_TIME_PATH @property def INTENSITY_MONITOR_PATH(self) -> str: return "diode/data" @property @deprecated( replacement="", reason="will be removed. Not used", since_version="0.8.0" ) def EPSILON_ROT_ANGLE(self) -> float: return 0.02 @property def SOURCE_NAME(self) -> str | None: return None @property def SOURCE_TYPE(self) -> str | None: return None @property def SOURCE_PROBE(self) -> str | None: return None @property def INSTRUMENT_NAME(self) -> str | None: return None @property @deprecated( replacement="", reason="will be removed. Not used", since_version="1.3.0" ) def ROCKING_PATH(self) -> str: return "/".join([self.SAMPLE_PATH, self.nx_sample_paths.ROCKING]) @property @deprecated( replacement="", reason="will be removed. Not used", since_version="1.3.0" ) def BASE_TILT_PATH(self) -> str: return "/".join([self.SAMPLE_PATH, self.nx_sample_paths.BASE_TILT]) class NXtomo_PATH_v_1_0(NXtomo_PATH): VERSION = 1.0 _NX_DETECTOR_PATHS = nxdetector.NEXUS_DETECTOR_PATH_V_1_0 _NX_INSTRUMENT_PATHS = nxinstrument.NEXUS_INSTRUMENT_PATH_V_1_0 _NX_SAMPLE_PATHS = nxsample.NEXUS_SAMPLE_PATH_V_1_0 _NX_SOURCE_PATHS = nxsource.NEXUS_SOURCE_PATH_V_1_0 _NX_CONTROL_PATHS = nxmonitor.NEXUS_MONITOR_PATH_V_1_1 nx_tomo_path_v_1_0 = NXtomo_PATH_v_1_0() class NXtomo_PATH_v_1_1(NXtomo_PATH_v_1_0): VERSION = 1.1 _NX_DETECTOR_PATHS = nxdetector.NEXUS_DETECTOR_PATH_V_1_1 _NX_INSTRUMENT_PATHS = nxinstrument.NEXUS_INSTRUMENT_PATH_V_1_1 _NX_SAMPLE_PATHS = nxsample.NEXUS_SAMPLE_PATH_V_1_1 _NX_SOURCE_PATHS = nxsource.NEXUS_SOURCE_PATH_V_1_1 @property def NAME_PATH(self) -> str: return "title" @property def BEAM_PATH(self) -> str: return "/".join([self.INSTRUMENT_PATH, self.nx_instrument_paths.BEAM]) @property def SOURCE_NAME(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.SOURCE, self.nx_source_paths.NAME, ] ) @property def SOURCE_TYPE(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.SOURCE, self.nx_source_paths.TYPE, ] ) @property def SOURCE_PROBE(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.SOURCE, self.nx_source_paths.PROBE, ] ) @property def INSTRUMENT_NAME(self) -> str: return "/".join([self.INSTRUMENT_PATH, self.nx_instrument_paths.NAME]) nx_tomo_path_v_1_1 = NXtomo_PATH_v_1_1() class NXtomo_PATH_v_1_2(NXtomo_PATH_v_1_1): VERSION = 1.2 _NX_DETECTOR_PATHS = nxdetector.NEXUS_DETECTOR_PATH_V_1_2 _NX_INSTRUMENT_PATHS = nxinstrument.NEXUS_INSTRUMENT_PATH_V_1_2 _NX_SAMPLE_PATHS = nxsample.NEXUS_SAMPLE_PATH_V_1_2 _NX_SOURCE_PATHS = nxsource.NEXUS_SOURCE_PATH_V_1_2 @property def INTENSITY_MONITOR_PATH(self) -> str: return "/".join( [ self.INSTRUMENT_PATH, self.nx_instrument_paths.DIODE, self.nx_detector_paths.DATA, ] ) nx_tomo_path_v_1_2 = NXtomo_PATH_v_1_2() class NXtomo_PATH_v_1_3(NXtomo_PATH_v_1_2): VERSION = 1.3 _NX_DETECTOR_PATHS = nxdetector.NEXUS_DETECTOR_PATH_V_1_3 _NX_INSTRUMENT_PATHS = nxinstrument.NEXUS_INSTRUMENT_PATH_V_1_3 _NX_SAMPLE_PATHS = nxsample.NEXUS_SAMPLE_PATH_V_1_3 _NX_SOURCE_PATHS = nxsource.NEXUS_SOURCE_PATH_V_1_3 _NX_TRANSFORMATIONS_PATHS = nxtransformations.NEXUS_TRANSFORMATIONS_PATH_V_1_3 nx_tomo_path_v_1_3 = NXtomo_PATH_v_1_3() class NXtomo_PATH_v_1_4(NXtomo_PATH_v_1_3): VERSION = 1.4 _NX_DETECTOR_PATHS = nxdetector.NEXUS_DETECTOR_PATH_V_1_4 _NX_INSTRUMENT_PATHS = nxinstrument.NEXUS_INSTRUMENT_PATH_V_1_4 _NX_SAMPLE_PATHS = nxsample.NEXUS_SAMPLE_PATH_V_1_4 _NX_SOURCE_PATHS = nxsource.NEXUS_SOURCE_PATH_V_1_4 _NX_TRANSFORMATIONS_PATHS = nxtransformations.NEXUS_TRANSFORMATIONS_PATH_V_1_4 nx_tomo_path_v_1_4 = NXtomo_PATH_v_1_4() nx_tomo_path_latest = nx_tomo_path_v_1_4 def get_paths(version: float | None) -> NXtomo_PATH: if version is None: version = LATEST_VERSION _logger.warning( f"version of the NXtomo not found. Will take the latest one ({LATEST_VERSION})" ) versions_dict = { # Ensure compatibility with "old" datasets (acquired before Dec. 2021). # nxtomo can still parse them provided that nx_version=1.0 is forced at init. 0.0: nx_tomo_path_v_1_0, 0.1: nx_tomo_path_v_1_0, # 1.0: nx_tomo_path_v_1_0, 1.1: nx_tomo_path_v_1_1, 1.2: nx_tomo_path_v_1_2, 1.3: nx_tomo_path_v_1_3, 1.4: nx_tomo_path_v_1_4, } if version not in versions_dict: if int(version) == 1: _logger.warning( f"nexus path {version} requested but unknown from this version of nxtomo {nxtomo.__version__}. Pick latest one of this major version. You might miss some information" ) version = LATEST_VERSION else: raise ValueError(f"Unknown major version of the nexus path ({version})") return versions_dict[version] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729761091.0 nxtomo-1.3.0.dev9/nxtomo/paths/nxtransformations.py0000644000175000017500000000137214706407503021630 0ustar00paynopayno"""nexus path used to define a `NXtransformations `_""" class NEXUS_TRANSFORMATIONS_PATH: TRANSFORMATION_TYPE = "@transformation_type" VECTOR = "@vector" OFFSET = "@offset" EQUIPMENT_COMPONENT = "@equipment_component" DEPENDS_ON = "@depends_on" class NEXUS_TRANSFORMATIONS_PATH_V_1_0(NEXUS_TRANSFORMATIONS_PATH): pass class NEXUS_TRANSFORMATIONS_PATH_V_1_1(NEXUS_TRANSFORMATIONS_PATH_V_1_0): pass class NEXUS_TRANSFORMATIONS_PATH_V_1_2(NEXUS_TRANSFORMATIONS_PATH_V_1_1): pass class NEXUS_TRANSFORMATIONS_PATH_V_1_3(NEXUS_TRANSFORMATIONS_PATH_V_1_2): pass class NEXUS_TRANSFORMATIONS_PATH_V_1_4(NEXUS_TRANSFORMATIONS_PATH_V_1_3): pass ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1729922990.4957538 nxtomo-1.3.0.dev9/nxtomo/paths/tests/0000755000175000017500000000000014707103656016621 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729761091.0 nxtomo-1.3.0.dev9/nxtomo/paths/tests/test_backward_compatibility.py0000644000175000017500000001452514706407503024745 0ustar00paynopayno# coding: utf-8 # /*########################################################################## # Copyright (C) 2016-2020 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################# """test compatibility with previously existing NexusPath classes""" __authors__ = ["H.Payno"] __license__ = "MIT" __date__ = "10/02/2022" import pytest from nxtomo.paths.nxtomo import get_paths as new_get_paths from nxtomo.paths.nxtomo import nx_tomo_path_latest # classes which were previously defining path to save data as NXtomo from tomoscan.esrf.scan.nxtomoscan.py file class _NEXUS_PATHS: """Register path to NXtomo The raw data are the one of the initial version. If the value is None then the path was not existing originally. """ PROJ_PATH = "instrument/detector/data" SCAN_META_PATH = "scan_meta/technique/scan" DET_META_PATH = "scan_meta/technique/detector" ROTATION_ANGLE_PATH = "sample/rotation_angle" SAMPLE_PATH = "sample" NAME_PATH = "sample/name" GRP_SIZE_ATTR = "group_size" SAMPLE_NAME_PATH = "sample/sample_name" X_TRANS_PATH = "sample/x_translation" Y_TRANS_PATH = "sample/y_translation" Z_TRANS_PATH = "sample/z_translation" IMG_KEY_PATH = "instrument/detector/image_key" IMG_KEY_CONTROL_PATH = "instrument/detector/image_key_control" X_PIXEL_SIZE_PATH = "instrument/detector/x_pixel_size" Y_PIXEL_SIZE_PATH = "instrument/detector/y_pixel_size" X_PIXEL_MAG_SIZE_PATH = "instrument/detector/x_magnified_pixel_size" Y_PIXEL_MAG_SIZE_PATH = "instrument/detector/y_magnified_pixel_size" DISTANCE_PATH = "instrument/detector/distance" FOV_PATH = "instrument/detector/field_of_view" EXPOSURE_TIME_PATH = "instrument/detector/count_time" TOMO_N_SCAN = "instrument/detector/tomo_n" ENERGY_PATH = "beam/incident_energy" START_TIME_PATH = "start_time" END_TIME_START = "end_time" # typo - deprecated END_TIME_PATH = "end_time" INTENSITY_MONITOR_PATH = "diode/data" EPSILON_ROT_ANGLE = 0.02 SOURCE_NAME = None SOURCE_TYPE = None SOURCE_PROBE = None INSTRUMENT_NAME = None ROCKING_PATH = "sample/rocking" BASE_TILT_PATH = "sample/base_tilt" class _NEXUS_PATHS_V_1_0(_NEXUS_PATHS): pass class _NEXUS_PATHS_V_1_1(_NEXUS_PATHS_V_1_0): ENERGY_PATH = "instrument/beam/incident_energy" SOURCE_NAME = "instrument/source/name" SOURCE_TYPE = "instrument/source/type" SOURCE_PROBE = "instrument/source/probe" INSTRUMENT_NAME = "instrument/name" NAME_PATH = "title" SAMPLE_NAME_PATH = "sample/name" _class_to_compare_versions = { 1.0: (_NEXUS_PATHS_V_1_0, new_get_paths(1.0)), 1.1: (_NEXUS_PATHS_V_1_1, new_get_paths(1.1)), } @pytest.mark.parametrize("path_version", (1.0, 1.1)) def test_compare_result(path_version): """insure the new way of providing nexus path does not break the previous API or values""" old_class, new_class = _class_to_compare_versions[path_version] assert old_class.PROJ_PATH == new_class.PROJ_PATH assert old_class.SCAN_META_PATH == new_class.SCAN_META_PATH assert old_class.DET_META_PATH == new_class.DET_META_PATH assert old_class.ROTATION_ANGLE_PATH == new_class.ROTATION_ANGLE_PATH assert old_class.SAMPLE_PATH == new_class.SAMPLE_PATH assert old_class.NAME_PATH == new_class.NAME_PATH assert old_class.GRP_SIZE_ATTR == new_class.GRP_SIZE_ATTR assert old_class.SAMPLE_NAME_PATH == new_class.SAMPLE_NAME_PATH assert old_class.X_TRANS_PATH == new_class.X_TRANS_PATH assert old_class.Y_TRANS_PATH == new_class.Y_TRANS_PATH assert old_class.Z_TRANS_PATH == new_class.Z_TRANS_PATH assert old_class.IMG_KEY_PATH == new_class.IMG_KEY_PATH assert old_class.IMG_KEY_CONTROL_PATH == new_class.IMG_KEY_CONTROL_PATH assert old_class.X_PIXEL_SIZE_PATH == new_class.X_PIXEL_SIZE_PATH assert old_class.Y_PIXEL_SIZE_PATH == new_class.Y_PIXEL_SIZE_PATH assert old_class.X_PIXEL_MAG_SIZE_PATH == new_class.X_PIXEL_MAG_SIZE_PATH assert old_class.Y_PIXEL_MAG_SIZE_PATH == new_class.Y_PIXEL_MAG_SIZE_PATH assert old_class.DISTANCE_PATH == new_class.DISTANCE_PATH assert old_class.FOV_PATH == new_class.FOV_PATH assert old_class.EXPOSURE_TIME_PATH == new_class.EXPOSURE_TIME_PATH assert old_class.TOMO_N_SCAN == new_class.TOMO_N_SCAN assert old_class.ENERGY_PATH == new_class.ENERGY_PATH assert old_class.START_TIME_PATH == new_class.START_TIME_PATH assert old_class.END_TIME_START == new_class.END_TIME_START assert old_class.END_TIME_PATH == new_class.END_TIME_PATH assert old_class.INTENSITY_MONITOR_PATH == new_class.INTENSITY_MONITOR_PATH assert old_class.EPSILON_ROT_ANGLE == new_class.EPSILON_ROT_ANGLE assert old_class.SOURCE_NAME == new_class.SOURCE_NAME assert old_class.SOURCE_TYPE == new_class.SOURCE_TYPE assert old_class.INSTRUMENT_NAME == new_class.INSTRUMENT_NAME assert old_class.ROCKING_PATH == new_class.ROCKING_PATH assert old_class.BASE_TILT_PATH == new_class.BASE_TILT_PATH def test_unknow_nexus_path_version(): assert new_get_paths(None) == nx_tomo_path_latest assert new_get_paths(1.99) == nx_tomo_path_latest with pytest.raises(ValueError): assert new_get_paths(-1.0) is None with pytest.raises(ValueError): assert new_get_paths(2.0) is None ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1729922990.4957538 nxtomo-1.3.0.dev9/nxtomo/utils/0000755000175000017500000000000014707103656015500 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1721904402.0 nxtomo-1.3.0.dev9/nxtomo/utils/__init__.py0000644000175000017500000000011114650426422017576 0ustar00paynopayno"""general utils along the project""" from .utils import * # noqa F401 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1728994274.0 nxtomo-1.3.0.dev9/nxtomo/utils/detectorsplitter.py0000644000175000017500000004666614703455742021476 0ustar00paynopayno""" module to split a NXtomo into several """ from __future__ import annotations import copy import logging import h5py import h5py._hl.selections as selection import numpy from silx.io.url import DataUrl from silx.io.utils import get_data from nxtomo.application.nxtomo import NXtomo from nxtomo.utils.io import DatasetReader _logger = logging.getLogger(__name__) __all__ = [ "NXtomoDetectorDataSplitter", ] class NXtomoDetectorDataSplitter: def __init__(self, nx_tomo: NXtomo) -> None: """ Splitter to split the dataset nxtomo.instrument.detector.data into several :class:`~nxtomo.nxobject.nxobject.NXobject`. This will also keep up to date rotation_angle, image_key, x_translation... datasets. In order to start the processing it requires a correctly formed NXtomo (same number of image_key, rotation_angle...) This is required for the pcotomo acquisition. :param nx_tomo: nx_tomo to be splitted """ if not isinstance(nx_tomo, NXtomo): raise TypeError( f"nxtomo is expected to be an instance of {NXtomo} and not {type(nx_tomo)}" ) self._nx_tomo = nx_tomo @property def nx_tomo(self) -> NXtomo: """ nx_tomo to be splitted :param nx_tomo: nx_tomo to be splitted """ return self._nx_tomo def split( self, data_slice: slice, nb_part: int | None, tomo_n: int | None = None, ) -> tuple: """ split the dataset targetted to have a set of h5py.VirtualSource. Behavior according to 'nb_part' and 'tomo_n' is the following: * if only 'nb_part' is provided it will split the NXtomo into 'nb_part' NXtomos * if only 'tomo_n' is provided it will take the first 'tomo_n' projections to create an NXtomo, then the next 'tomo_n'... * if both are provided then it will use the 'tomo_n' parameter (since version 1.3). As it will work better in case of missing frames in the acquisition. :param nb_part: in how many contiguous dataset the instrument.detector.data must be splitted. :param tomo_n: expected number of projection per NXtomo :raises: ValueError if the number of frame, image_key, x_translation... is incoherent. """ if nb_part is not None and not isinstance( nb_part, (int, type(None), numpy.integer) ): raise TypeError(f"nb_part is expected to be an int not {type(nb_part)}") if tomo_n is not None and not isinstance( tomo_n, (int, type(None), numpy.integer) ): raise TypeError(f"tomo_n is expected to be an int not {type(tomo_n)}") invalid_datasets = self.get_invalid_datasets() if len(invalid_datasets) > 0: _logger.warning( f"Some datasets have incoherent length compared to nx_tomo.instrument.detector.data length: {invalid_datasets}" ) if data_slice.step not in (1, None): raise ValueError("slice step must be one.") elif tomo_n is not None: assert tomo_n > 0, "invalid value for tomo_n" return self._split_from_tomo_n(tomo_n=tomo_n, data_slice=data_slice) else: if nb_part is None: raise ValueError("tomo_n or part_n should be provided. None provided") elif nb_part <= 0: raise ValueError(f"nb_part is expected to be >=1 not {nb_part}") elif nb_part == 1: return [ self.nx_tomo, ] elif (data_slice.stop - data_slice.start) % nb_part != 0 or ( tomo_n is not None and ((data_slice.stop - data_slice.start) % nb_part == tomo_n) ): raise ValueError( f"incoherent split requested. Request to split {(data_slice.stop - data_slice.start - 1)} slices into {nb_part} parts. The simplest is to provide tomo_n instead" ) else: return self._split_from_nb_part(nb_part=nb_part, data_slice=data_slice) def _split_from_tomo_n(self, tomo_n: int, data_slice: slice): parts = [] n_slices_remaining = data_slice.stop - data_slice.start - 1 current_slice = data_slice i_part = 0 while n_slices_remaining > 0: new_slice_size = tomo_n new_slice = slice( current_slice.start + new_slice_size * i_part, min( current_slice.start + new_slice_size * (i_part + 1), current_slice.stop, ), 1, ) nx_tomo_part = self.replace(old_slice=data_slice, new_slice=new_slice) parts.append(nx_tomo_part) n_slices_remaining -= tomo_n i_part += 1 return parts def _split_from_nb_part(self, nb_part, data_slice: slice): parts = [] current_slice = data_slice for i_part in range(nb_part): new_slice_size = (current_slice.stop - current_slice.start) // nb_part new_slice = slice( current_slice.start + new_slice_size * i_part, current_slice.start + new_slice_size * (i_part + 1), 1, ) nx_tomo_part = self.replace(old_slice=data_slice, new_slice=new_slice) parts.append(nx_tomo_part) return parts def replace(self, old_slice: slice, new_slice: slice) -> NXtomo: """ replace a section of the NXtomo instrument.detector.data by a subsection of it """ if not isinstance(old_slice, slice): raise TypeError("old_slice is expected to be a slice") if not isinstance(new_slice, slice): raise TypeError("new_slice is expected to be a slice") if old_slice.step not in (None, 1): raise ValueError("old_slice step is expected to be one") if new_slice.step not in (None, 1): raise ValueError("new_slice step is expected to be one") if new_slice.start < old_slice.start or new_slice.stop > old_slice.stop: raise ValueError( f"new_slice ({new_slice}) must be contained in old_slice ({old_slice})" ) if old_slice.start < 0: raise ValueError( f"old_slice.start must be at least 0 not {old_slice.start}" ) n_frames = self._get_n_frames() if n_frames is not None and old_slice.stop > n_frames: raise ValueError( f"old_slice.start must be at most {n_frames} not {old_slice.stop}" ) # handles datasets other than instrument.detector.data result_nx_tomo = copy.deepcopy(self.nx_tomo) if ( result_nx_tomo.control and result_nx_tomo.control.data is not None and result_nx_tomo.control.data.value is not None ): result_nx_tomo.control.data = numpy.concatenate( [ self.nx_tomo.control.data.value[: old_slice.start], self.nx_tomo.control.data.value[new_slice], self.nx_tomo.control.data.value[old_slice.stop :], ] ) if result_nx_tomo.sample.rotation_angle is not None: result_nx_tomo.sample.rotation_angle = numpy.concatenate( [ self.nx_tomo.sample.rotation_angle[: old_slice.start], self.nx_tomo.sample.rotation_angle[new_slice], self.nx_tomo.sample.rotation_angle[old_slice.stop :], ] ) if result_nx_tomo.sample.x_translation.value is not None: result_nx_tomo.sample.x_translation.value = numpy.concatenate( [ self.nx_tomo.sample.x_translation.value[: old_slice.start], self.nx_tomo.sample.x_translation.value[new_slice], self.nx_tomo.sample.x_translation.value[old_slice.stop :], ] ) if result_nx_tomo.sample.y_translation.value is not None: result_nx_tomo.sample.y_translation.value = numpy.concatenate( [ self.nx_tomo.sample.y_translation.value[: old_slice.start], self.nx_tomo.sample.y_translation.value[new_slice], self.nx_tomo.sample.y_translation.value[old_slice.stop :], ] ) if result_nx_tomo.sample.z_translation.value is not None: result_nx_tomo.sample.z_translation.value = numpy.concatenate( [ self.nx_tomo.sample.z_translation.value[: old_slice.start], self.nx_tomo.sample.z_translation.value[new_slice], self.nx_tomo.sample.z_translation.value[old_slice.stop :], ] ) if result_nx_tomo.instrument.detector.image_key_control is not None: result_nx_tomo.instrument.detector.image_key_control = numpy.concatenate( [ self.nx_tomo.instrument.detector.image_key_control[ : old_slice.start ], self.nx_tomo.instrument.detector.image_key_control[new_slice], self.nx_tomo.instrument.detector.image_key_control[ old_slice.stop : ], ] ) # handles detector.data dataset. This one is special because it can contains # numpy arrays (raw data), h5py.VirtualSource or DataUrl (or be None) det_data = self.nx_tomo.instrument.detector.data if det_data is None: pass elif isinstance(det_data, numpy.ndarray): result_nx_tomo.instrument.detector.data = numpy.concatenate( [ det_data[: old_slice.start], det_data[new_slice], det_data[old_slice.stop :], ] ) elif isinstance(det_data, (tuple, list)): result_nx_tomo.instrument.detector.data = numpy.concatenate( [ self._get_detector_data_sub_section(slice(0, old_slice.start, 1)), self._get_detector_data_sub_section(new_slice), self._get_detector_data_sub_section( slice(old_slice.stop, n_frames + 1, 1) ), ] ).tolist() else: raise TypeError( f"instrument.detector.data must be a numpy array or a VirtualSource or a DataUrl. Not {type(det_data)}" ) return result_nx_tomo def _get_detector_data_sub_section(self, section: slice) -> tuple: """ return a tuple of DataUrl or h5py.VirtualSource fitting the slice requested """ det_data = self.nx_tomo.instrument.detector.data res = [] if section.start == section.stop: return () def get_elmt_shape(elmt: h5py.VirtualSource | DataUrl) -> tuple: if isinstance(elmt, h5py.VirtualSource): return elmt.shape elif isinstance(elmt, DataUrl): with DatasetReader(elmt) as dataset: return dataset.shape else: raise TypeError( f"elmt must be a DataUrl or h5py.VirtualSource. Not {type(elmt)}" ) def get_elmt_nb_frame(elmt: h5py.VirtualSource | DataUrl) -> int: shape = get_elmt_shape(elmt) if len(shape) == 3: return shape[0] elif len(shape) == 2: return 1 else: raise ValueError(f"virtualSource: {elmt} is not 2D or 3D") def construct_slices_elmt_list() -> dict: "create a dictionary with slice as key and DataUrl or h5py.VirtualSource as value" slices_elmts = [] current_index = 0 for elmt in det_data: n_frame = get_elmt_nb_frame(elmt) slice_ = slice(current_index, current_index + n_frame, 1) slices_elmts.append([slice_, elmt]) current_index += n_frame return slices_elmts def intersect(slice_1, slice_2): """check if the two slices intersect""" assert isinstance(slice_1, slice) and slice_1.step == 1 assert isinstance(slice_2, slice) and slice_2.step == 1 return slice_1.start < slice_2.stop and slice_1.stop > slice_2.start def select( elmt: h5py.VirtualSource | DataUrl, region: slice ) -> h5py.VirtualSource | DataUrl: """select a region on the elmt. Can return at most the elmt itself or a region of it""" elmt_n_frame = get_elmt_nb_frame(elmt) assert elmt_n_frame != 0 clamp_region = slice( max(0, region.start), min(elmt_n_frame, region.stop), 1, ) assert clamp_region.start != clamp_region.stop if isinstance(elmt, h5py.VirtualSource): frame_dims = elmt.shape[-2], elmt.shape[-1] n_frames = clamp_region.stop - clamp_region.start assert n_frames > 0 shape = (n_frames, frame_dims[0], frame_dims[1]) vs = h5py.VirtualSource( path_or_dataset=elmt.path, name=elmt.name, shape=shape, ) vs.sel = selection.select(elmt.shape, clamp_region) return vs else: if elmt.data_slice() is None: data_slice = clamp_region elif isinstance(elmt.data_slice(), slice): if elmt.data_slice.step not in (1, None): raise ValueError("DataUrl with step !=1 are not handled") else: data_slice = slice( elmt.data_slice.start + clamp_region.start, elmt.data_slice.start + clamp_region.stop, 1, ) else: raise TypeError( f"data_slice is expected to be None or a slice. Not {type(elmt.data_slice())}" ) return DataUrl( file_path=elmt.file_path(), data_path=elmt.data_path(), scheme=elmt.scheme(), data_slice=data_slice, ) for slice_raw_data, elmt in construct_slices_elmt_list(): if intersect(section, slice_raw_data): res.append( select( elmt, slice( section.start - slice_raw_data.start, section.stop - slice_raw_data.start, 1, ), ) ) return tuple(res) def get_invalid_datasets(self) -> dict: """ return a dict of invalid dataset compare to the instrument.detector.data dataset. Key is the location ? path to the invalid dataset. Value is the reason of the failure. """ invalid_datasets = {} n_frames = self._get_n_frames() # check rotation_angle if self.nx_tomo.sample.rotation_angle is not None: n_rotation_angles = len(self.nx_tomo.sample.rotation_angle) if n_rotation_angles != n_frames: invalid_datasets["sample/rotation_angle"] = ( f"{n_rotation_angles} angles found when {n_frames} expected" ) # check image_key_control (force to have the same number as image_key already so only check one) if self.nx_tomo.instrument.detector.image_key_control is not None: n_image_key_control = len( self.nx_tomo.instrument.detector.image_key_control ) if n_image_key_control != n_frames: invalid_datasets["instrument/detector/image_key_control"] = ( f"{n_image_key_control} image_key_control values found when {n_frames} expected" ) # check x_translation if self.nx_tomo.sample.x_translation.value is not None: n_x_translation = len(self.nx_tomo.sample.x_translation.value) if n_x_translation != n_frames: invalid_datasets["sample/x_translation"] = ( f"{n_x_translation} x translations found when {n_frames} expected" ) # check y_translation if self.nx_tomo.sample.y_translation.value is not None: n_y_translation = len(self.nx_tomo.sample.y_translation.value) if n_y_translation != n_frames: invalid_datasets["sample/y_translation"] = ( f"{n_y_translation} y translations found when {n_frames} expected" ) # check z_translation if self.nx_tomo.sample.z_translation.value is not None: n_z_translation = len(self.nx_tomo.sample.z_translation.value) if n_z_translation != n_frames: invalid_datasets["sample/z_translation"] = ( f"{n_z_translation} z translations found when {n_frames} expected" ) return invalid_datasets def _get_n_frames(self) -> int | None: dataset = self.nx_tomo.instrument.detector.data if dataset is None: return None elif isinstance(dataset, numpy.ndarray): if not dataset.ndim == 3: raise ValueError( f"nx_tomo.instrument.detector.data is expected to be 3D and not {dataset.ndim}D." ) else: return dataset.shape[0] elif isinstance(dataset, (list, tuple)): n_frames = 0 for dataset_elmt in dataset: if isinstance(dataset_elmt, h5py.VirtualSource): shape = dataset_elmt.shape if len(shape) == 3: n_frames += dataset_elmt.shape[0] elif len(shape) == 2: n_frames += 1 else: raise ValueError( f"h5py.VirtualSource shape is expected to be 2D (single frame) or 3D. Not {len(shape)}D." ) elif isinstance(dataset_elmt, DataUrl): data = get_data(dataset_elmt) if not isinstance(data, numpy.ndarray): raise TypeError( f"url: {dataset_elmt.path()} is not pointing to an array" ) elif data.ndim == 2: n_frames += 1 elif data.ndim == 3: n_frames += data.shape[0] else: raise ValueError( f"url: {dataset_elmt.path()} is expected to be 2D or 3D. Not {dataset_elmt.ndim} D" ) else: raise TypeError( f"elements of {type(dataset)} must be h5py.VirtualSource) or silx.io.url.DataUrl and not {type(dataset_elmt)}" ) return n_frames else: raise TypeError( f"nx_tomo.instrument.detector.data type ({type(dataset)}) is not handled" ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757728.0 nxtomo-1.3.0.dev9/nxtomo/utils/frameappender.py0000644000175000017500000003666714676676640020722 0ustar00paynopayno""" module to append frame to an hdf5 dataset (that can be virtual) """ from __future__ import annotations import os import h5py import h5py._hl.selections as selection import numpy from h5py import h5s as h5py_h5s from silx.io.url import DataUrl from silx.io.utils import get_data, h5py_read_dataset from silx.io.utils import open as hdf5_open from nxtomo.io import ( cwd_context, HDF5File, to_target_rel_path, from_data_url_to_virtual_source, ) from nxtomo.utils.io import DatasetReader __all__ = [ "FrameAppender", ] class FrameAppender: def __init__( self, data: numpy.ndarray | DataUrl, file_path: str, data_path: str, where: str, logger=None, ): """ Class to insert 2D frame(s) to an existing dataset :param data: data to append :param file_path: file path of the HDF5 dataset to extend :param data_path: file data_path of the HDF5 dataset to extend :param where: can be 'start' or 'end' to know if we should append frame at the beginning or at the end :param logger: optional logger to handle logs """ if where not in ("start", "end"): raise ValueError("`where` should be `start` or `end`") if not isinstance( data, (DataUrl, numpy.ndarray, list, tuple, h5py.VirtualSource) ): raise TypeError( f"data should be an instance of DataUrl or a numpy array not {type(data)}" ) self.data = data self.file_path = os.path.abspath(file_path) self.data_path = data_path self.where = where self.logger = logger def process(self) -> None: """ main function. Will start the insertion of frame(s) """ with HDF5File(self.file_path, mode="a") as h5s: if self.data_path in h5s: self._add_to_existing_dataset(h5s) else: self._create_new_dataset(h5s) if self.logger: self.logger.info(f"data added to {self.data_path}@{self.file_path}") def _add_to_existing_virtual_dataset(self, h5s): if ( h5py.version.hdf5_version_tuple[0] <= 1 and h5py.version.hdf5_version_tuple[1] < 12 ): if self.logger: self.logger.warning( "You are working on virtual dataset" "with a hdf5 version < 12. Frame " "you want to change might be " "modified depending on the working " "directory without notifying." "See https://github.com/silx-kit/silx/issues/3277" ) if isinstance(self.data, h5py.VirtualSource): self.__insert_virtual_source_in_vds(h5s=h5s, new_virtual_source=self.data) elif isinstance(self.data, DataUrl): if self.logger is not None: self.logger.debug( f"Update virtual dataset: {self.data_path}@{self.file_path}" ) # store DataUrl in the current virtual dataset url = self.data def check_dataset(dataset_frm_url): data_need_reshape = False """check if the dataset is valid or might need a reshape""" if dataset_frm_url.ndim not in (2, 3): raise ValueError(f"{url.path()} should point to 2D or 3D dataset ") if dataset_frm_url.ndim == 2: new_shape = 1, dataset_frm_url.shape[0], dataset_frm_url.shape[1] if self.logger is not None: self.logger.info( f"reshape provided data to 3D (from {dataset_frm_url.shape} to {new_shape})" ) data_need_reshape = True return data_need_reshape loaded_dataset = None if url.data_slice() is None: # case we can avoid to load the data in memory with DatasetReader(url) as data_frm_url: data_need_reshape = check_dataset(data_frm_url) else: data_frm_url = get_data(url) data_need_reshape = check_dataset(data_frm_url) loaded_dataset = data_frm_url if url.data_slice() is None and not data_need_reshape: # case we can avoid to load the data in memory with DatasetReader(self.data) as data_frm_url: self.__insert_url_in_vds(h5s, url, data_frm_url) else: if loaded_dataset is None: data_frm_url = get_data(url) else: data_frm_url = loaded_dataset self.__insert_url_in_vds(h5s, url, data_frm_url) else: raise TypeError( "Provided data is a numpy array when given" "dataset path is a virtual dataset. " "You must store the data somewhere else " "and provide a DataUrl" ) def __insert_url_in_vds(self, h5s, url, data_frm_url): if data_frm_url.ndim == 2: dim_2, dim_1 = data_frm_url.shape data_frm_url = data_frm_url.reshape(1, dim_2, dim_1) elif data_frm_url.ndim == 3: _, dim_2, dim_1 = data_frm_url.shape else: raise ValueError("data to had is expected to be 2 or 3 d") new_virtual_source = h5py.VirtualSource( path_or_dataset=url.file_path(), name=url.data_path(), shape=data_frm_url.shape, ) if url.data_slice() is not None: # in the case we have to process to a FancySelection with hdf5_open(os.path.abspath(url.file_path())) as h5sd: dst = h5sd[url.data_path()] sel = selection.select( h5sd[url.data_path()].shape, url.data_slice(), dst ) new_virtual_source.sel = sel self.__insert_virtual_source_in_vds( h5s=h5s, new_virtual_source=new_virtual_source, relative_path=True ) def __insert_virtual_source_in_vds( self, h5s, new_virtual_source: h5py.VirtualSource, relative_path=True ): if not isinstance(new_virtual_source, h5py.VirtualSource): raise TypeError( f"{new_virtual_source} is expected to be an instance of h5py.VirtualSource and not {type(new_virtual_source)}" ) if not len(new_virtual_source.shape) == 3: raise ValueError( f"virtual source shape is expected to be 3D and not {len(new_virtual_source.shape)}D." ) # preprocess virtualSource to insure having a relative path if relative_path: vds_file_path = to_target_rel_path(new_virtual_source.path, self.file_path) new_virtual_source_sel = new_virtual_source.sel new_virtual_source = h5py.VirtualSource( path_or_dataset=vds_file_path, name=new_virtual_source.name, shape=new_virtual_source.shape, dtype=new_virtual_source.dtype, ) new_virtual_source.sel = new_virtual_source_sel virtual_sources_len = [] virtual_sources = [] # we need to recreate the VirtualSource they are not # store or available from the API for vs_info in h5s[self.data_path].virtual_sources(): length, vs = self._recreate_vs(vs_info=vs_info, vds_file=self.file_path) virtual_sources.append(vs) virtual_sources_len.append(length) n_frames = h5s[self.data_path].shape[0] + new_virtual_source.shape[0] data_type = h5s[self.data_path].dtype if self.where == "start": virtual_sources.insert(0, new_virtual_source) virtual_sources_len.insert(0, new_virtual_source.shape[0]) else: virtual_sources.append(new_virtual_source) virtual_sources_len.append(new_virtual_source.shape[0]) # create the new virtual dataset layout = h5py.VirtualLayout( shape=( n_frames, new_virtual_source.shape[-2], new_virtual_source.shape[-1], ), dtype=data_type, ) last = 0 for v_source, vs_len in zip(virtual_sources, virtual_sources_len): layout[last : vs_len + last] = v_source last += vs_len if self.data_path in h5s: del h5s[self.data_path] h5s.create_virtual_dataset(self.data_path, layout) def _add_to_existing_none_virtual_dataset(self, h5s): """ for now when we want to add data *to a none virtual dataset* we always duplicate data if provided from a DataUrl. We could create a virtual dataset as well but seems to complicated for a use case that we don't really have at the moment. :param h5s: """ if self.logger is not None: self.logger.debug("Update dataset: {entry}@{file_path}") if isinstance(self.data, (numpy.ndarray, list, tuple)): new_data = self.data else: url = self.data new_data = get_data(url) if new_data.ndim == 2: new_data = new_data.reshape(1, new_data.shape[0], new_data.shape[1]) if isinstance(new_data, numpy.ndarray): if not new_data.shape[1:] == h5s[self.data_path].shape[1:]: raise ValueError( f"Data shapes are incoherent: {new_data.shape} vs {h5s[self.data_path].shape}" ) new_shape = ( new_data.shape[0] + h5s[self.data_path].shape[0], new_data.shape[1], new_data.shape[2], ) data_to_store = numpy.empty(new_shape) if self.where == "start": data_to_store[: new_data.shape[0]] = new_data data_to_store[new_data.shape[0] :] = h5py_read_dataset( h5s[self.data_path] ) else: data_to_store[: h5s[self.data_path].shape[0]] = h5py_read_dataset( h5s[self.data_path] ) data_to_store[h5s[self.data_path].shape[0] :] = new_data else: assert isinstance( self.data, (list, tuple) ), f"Unmanaged data type {type(self.data)}" o_data = h5s[self.data_path] o_data = list(h5py_read_dataset(o_data)) if self.where == "start": new_data.extend(o_data) data_to_store = numpy.asarray(new_data) else: o_data.extend(new_data) data_to_store = numpy.asarray(o_data) del h5s[self.data_path] h5s[self.data_path] = data_to_store def _add_to_existing_dataset(self, h5s): """Add the frame to an existing dataset""" if h5s[self.data_path].is_virtual: self._add_to_existing_virtual_dataset(h5s=h5s) else: self._add_to_existing_none_virtual_dataset(h5s=h5s) def _create_new_dataset(self, h5s): """ needs to create a new dataset. In this case the policy is: - if a DataUrl is provided then we create a virtual dataset - if a numpy array is provided then we create a 'standard' dataset """ if isinstance(self.data, DataUrl): url = self.data url_file_path = to_target_rel_path(url.file_path(), self.file_path) url = DataUrl( file_path=url_file_path, data_path=url.data_path(), scheme=url.scheme(), data_slice=url.data_slice(), ) with cwd_context(os.path.dirname(self.file_path)): vs, vs_shape, data_type = from_data_url_to_virtual_source( url, target_path=self.file_path ) layout = h5py.VirtualLayout(shape=vs_shape, dtype=data_type) layout[:] = vs h5s.create_virtual_dataset(self.data_path, layout) elif isinstance(self.data, h5py.VirtualSource): virtual_source = self.data layout = h5py.VirtualLayout( shape=virtual_source.shape, dtype=virtual_source.dtype, ) vds_file_path = to_target_rel_path(virtual_source.path, self.file_path) virtual_source_rel_path = h5py.VirtualSource( path_or_dataset=vds_file_path, name=virtual_source.name, shape=virtual_source.shape, dtype=virtual_source.dtype, ) virtual_source_rel_path.sel = virtual_source.sel layout[:] = virtual_source_rel_path # convert path to relative h5s.create_virtual_dataset(self.data_path, layout) elif not isinstance(self.data, numpy.ndarray): raise TypeError( f"self.data should be an instance of DataUrl, a numpy array or a VirtualSource. Not {type(self.data)}" ) else: h5s[self.data_path] = self.data @staticmethod def _recreate_vs(vs_info, vds_file): """Simple util to retrieve a h5py.VirtualSource from virtual source information. to understand clearly this function you might first have a look at the use case exposed in issue: https://gitlab.esrf.fr/tomotools/nxtomomill/-/issues/40 """ with cwd_context(os.path.dirname(vds_file)): dataset_file_path = vs_info.file_name # in case the virtual source is in the same file if dataset_file_path == ".": dataset_file_path = vds_file with hdf5_open(dataset_file_path) as vs_node: dataset = vs_node[vs_info.dset_name] select_bounds = vs_info.vspace.get_select_bounds() left_bound = select_bounds[0] right_bound = select_bounds[1] length = right_bound[0] - left_bound[0] + 1 # warning: for now step is not managed with virtual # dataset virtual_source = h5py.VirtualSource( vs_info.file_name, vs_info.dset_name, shape=dataset.shape, ) # here we could provide dataset but we won't to # insure file path will be relative. type_code = vs_info.src_space.get_select_type() # check for unlimited selections in case where selection is regular # hyperslab, which is the only allowed case for h5s.UNLIMITED to be # in the selection if ( type_code == h5py_h5s.SEL_HYPERSLABS and vs_info.src_space.is_regular_hyperslab() ): ( source_start, stride, count, block, ) = vs_info.src_space.get_regular_hyperslab() source_end = source_start[0] + length sel = selection.select( dataset.shape, slice(source_start[0], source_end), dataset=dataset, ) virtual_source.sel = sel return ( length, virtual_source, ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729761091.0 nxtomo-1.3.0.dev9/nxtomo/utils/io.py0000644000175000017500000001264114706407503016462 0ustar00paynopayno"""io utils""" import contextlib import h5py import logging import traceback import functools from contextlib import contextmanager try: import hdf5plugin # noqa F401 except ImportError: pass from silx.io.url import DataUrl from silx.io.utils import open as hdf5_open __all__ = ["EntryReader", "DatasetReader", "deprecated_warning", "deprecated"] class _BaseReader(contextlib.AbstractContextManager): def __init__(self, url: DataUrl): if not isinstance(url, DataUrl): raise TypeError(f"url should be an instance of DataUrl. Not {type(url)}") if url.scheme() not in ("silx", "h5py"): raise ValueError("Valid scheme are silx and h5py") if url.data_slice() is not None: raise ValueError( "Data slices are not managed. Data path should " "point to a bliss node (h5py.Group)" ) self._url = url self._file_handler = None def __exit__(self, *exc): return self._file_handler.close() class EntryReader(_BaseReader): """Context manager used to read a bliss node""" def __enter__(self): self._file_handler = hdf5_open(filename=self._url.file_path()) if self._url.data_path() == "": entry = self._file_handler elif self._url.data_path() not in self._file_handler: raise KeyError( f"data path '{self._url.data_path()}' doesn't exists from '{self._url.file_path()}'" ) else: entry = self._file_handler[self._url.data_path()] if not isinstance(entry, h5py.Group): raise ValueError("Data path should point to a bliss node (h5py.Group)") return entry class DatasetReader(_BaseReader): """Context manager used to read a bliss node""" def __enter__(self): self._file_handler = hdf5_open(filename=self._url.file_path()) entry = self._file_handler[self._url.data_path()] if not isinstance(entry, h5py.Dataset): raise ValueError( f"Data path ({self._url.path()}) should point to a dataset (h5py.Dataset)" ) return entry depreclog = logging.getLogger("nxtomo.DEPRECATION") deprecache = set([]) def deprecated_warning( type_, name, reason=None, replacement=None, since_version=None, only_once=True, skip_backtrace_count=0, ): """ Function to log a deprecation warning :param type_: Nature of the object to be deprecated: "Module", "Function", "Class" ... :param name: Object name. :param reason: Reason for deprecating this function (e.g. "feature no longer provided", :param replacement: Name of replacement function (if the reason for deprecating was to rename the function) :param since_version: First *silx* version for which the function was deprecated (e.g. "0.5.0"). :param only_once: If true, the deprecation warning will only be generated one time for each different call locations. Default is true. :param skip_backtrace_count: Amount of last backtrace to ignore when logging the backtrace """ if not depreclog.isEnabledFor(logging.WARNING): # Avoid computation when it is not logged return msg = "%s %s is deprecated" if since_version is not None: msg += " since silx version %s" % since_version msg += "." if reason is not None: msg += " Reason: %s." % reason if replacement is not None: msg += " Use '%s' instead." % replacement msg += "\n%s" limit = 2 + skip_backtrace_count backtrace = "".join(traceback.format_stack(limit=limit)[0]) backtrace = backtrace.rstrip() if only_once: data = (msg, type_, name, backtrace) if data in deprecache: return else: deprecache.add(data) depreclog.warning(msg, type_, name, backtrace) def deprecated( func=None, reason=None, replacement=None, since_version=None, only_once=True, skip_backtrace_count=1, ): """ Decorator that deprecates the use of a function :param str reason: Reason for deprecating this function (e.g. "feature no longer provided", :param str replacement: Name of replacement function (if the reason for deprecating was to rename the function) :param str since_version: First *silx* version for which the function was deprecated (e.g. "0.5.0"). :param bool only_once: If true, the deprecation warning will only be generated one time. Default is true. :param int skip_backtrace_count: Amount of last backtrace to ignore when logging the backtrace """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): deprecated_warning( type_="Function", name=func.__name__, reason=reason, replacement=replacement, since_version=since_version, only_once=only_once, skip_backtrace_count=skip_backtrace_count, ) return func(*args, **kwargs) return wrapper if func is not None: return decorator(func) return decorator @contextmanager def ignore_deprecation_warning(): """filter logs from 'nxtomo.DEPRECATION'""" def filter(record): return record.name != depreclog.name depreclog.addFilter(filter) yield depreclog.removeFilter(filter) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1729922990.4957538 nxtomo-1.3.0.dev9/nxtomo/utils/tests/0000755000175000017500000000000014707103656016642 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757728.0 nxtomo-1.3.0.dev9/nxtomo/utils/tests/test_detectorsplitter.py0000644000175000017500000004721714676676640023703 0ustar00paynopaynoimport os from tempfile import TemporaryDirectory import h5py import numpy import pytest from silx.io.url import DataUrl from nxtomo.application.nxtomo import NXtomo from nxtomo.utils.detectorsplitter import NXtomoDetectorDataSplitter def test_get_invalid_datasets(): """test the NXtomoDetectorDataSplitter get_invalid_datasets function""" nx_tomo = NXtomo("test") n_frames = 10 nx_tomo.instrument.detector.data = numpy.random.random(100 * 100 * 10).reshape( [n_frames, 100, 100] ) splitter = NXtomoDetectorDataSplitter(nx_tomo=nx_tomo) assert len(splitter.get_invalid_datasets()) == 0 # test rotation angle nx_tomo.sample.rotation_angle = [12, 13] assert len(splitter.get_invalid_datasets()) == 1 nx_tomo.sample.rotation_angle = [0] * n_frames assert len(splitter.get_invalid_datasets()) == 0 # test image_key_control nx_tomo.instrument.detector.image_key_control = [0] assert len(splitter.get_invalid_datasets()) == 1 nx_tomo.instrument.detector.image_key_control = [0] * n_frames assert len(splitter.get_invalid_datasets()) == 0 # test x_translation nx_tomo.sample.x_translation = [0] assert len(splitter.get_invalid_datasets()) == 1 nx_tomo.sample.x_translation = [0] * n_frames assert len(splitter.get_invalid_datasets()) == 0 # test y_translation nx_tomo.sample.y_translation = [0] assert len(splitter.get_invalid_datasets()) == 1 nx_tomo.sample.y_translation = [0] * n_frames assert len(splitter.get_invalid_datasets()) == 0 # test z_translation nx_tomo.sample.z_translation = [0] assert len(splitter.get_invalid_datasets()) == 1 nx_tomo.sample.z_translation = [0] * n_frames assert len(splitter.get_invalid_datasets()) == 0 def test_spliter_raw_data(): """test the splitter on a simple non virtual h5py Dataset""" nx_tomo = NXtomo("test_raw_data") n_frames = 20 nx_tomo.instrument.detector.data = numpy.random.random( 100 * 100 * n_frames ).reshape([n_frames, 100, 100]) nx_tomo.sample.rotation_angle = [0, 12] splitter = NXtomoDetectorDataSplitter(nx_tomo=nx_tomo) # check incoherent number of rotation with pytest.raises(ValueError): splitter.split(data_slice=slice(0, 100, 1), nb_part=2) nx_tomo.sample.rotation_angle = numpy.linspace(0, 180, num=n_frames, endpoint=False) # check slice nb_part < 0 with pytest.raises(ValueError): splitter.split(data_slice=slice(0, 100, 1), nb_part=-1) # check slice step != 1 with pytest.raises(ValueError): splitter.split(data_slice=slice(0, 100, 2), nb_part=2) # check incoherent number of frames with pytest.raises(ValueError): splitter.split(data_slice=slice(0, 99, 2), nb_part=2) # check x translation nx_tomo.sample.x_translation = [0, 12] with pytest.raises(ValueError): splitter.split(data_slice=slice(0, 100, 1), nb_part=2) nx_tomo.sample.x_translation = numpy.random.random(n_frames) nx_tomo.sample.y_translation = numpy.random.random(n_frames) nx_tomo.sample.z_translation = numpy.random.random(n_frames) # check image key nx_tomo.instrument.detector.image_key_control = [0, 2] with pytest.raises(ValueError): splitter.split(data_slice=slice(0, 100, 1), nb_part=2) nx_tomo.instrument.detector.image_key_control = [ numpy.random.randint(low=-1, high=2) for i in range(n_frames) ] assert splitter.split(data_slice=slice(0, 100, 1), nb_part=1) == [ nx_tomo, ] # check error if request to split a region bigger that the one (100 vs n_frames) with pytest.raises(ValueError): splitted_nx_tomo = splitter.split(data_slice=slice(0, 100, 1), nb_part=2) splitted_nx_tomo = splitter.split(data_slice=slice(0, 20, 1), nb_part=2) assert len(splitted_nx_tomo) == 2 s_nx_tomo_1, s_nx_tomo_2 = splitted_nx_tomo # chek rotation_angle numpy.testing.assert_array_equal( s_nx_tomo_1.sample.rotation_angle, nx_tomo.sample.rotation_angle[0 : n_frames // 2], ) numpy.testing.assert_array_equal( s_nx_tomo_2.sample.rotation_angle, nx_tomo.sample.rotation_angle[n_frames // 2 :], ) # check image key and image key numpy.testing.assert_array_equal( s_nx_tomo_1.instrument.detector.image_key_control, nx_tomo.instrument.detector.image_key_control[0 : n_frames // 2], ) numpy.testing.assert_array_equal( s_nx_tomo_2.instrument.detector.image_key_control, nx_tomo.instrument.detector.image_key_control[n_frames // 2 :], ) # chek x translation numpy.testing.assert_array_equal( s_nx_tomo_1.sample.x_translation.value, nx_tomo.sample.x_translation.value[0 : n_frames // 2], ) numpy.testing.assert_array_equal( s_nx_tomo_2.sample.x_translation.value, nx_tomo.sample.x_translation.value[n_frames // 2 :], ) # chek y translation numpy.testing.assert_array_equal( s_nx_tomo_1.sample.y_translation.value, nx_tomo.sample.y_translation.value[0 : n_frames // 2], ) numpy.testing.assert_array_equal( s_nx_tomo_2.sample.y_translation.value, nx_tomo.sample.y_translation.value[n_frames // 2 :], ) # chek z translation numpy.testing.assert_array_equal( s_nx_tomo_1.sample.z_translation.value, nx_tomo.sample.z_translation.value[0 : n_frames // 2], ) numpy.testing.assert_array_equal( s_nx_tomo_2.sample.z_translation.value, nx_tomo.sample.z_translation.value[n_frames // 2 :], ) # check detector data numpy.testing.assert_array_equal( s_nx_tomo_1.instrument.detector.data, nx_tomo.instrument.detector.data[0 : n_frames // 2], ) numpy.testing.assert_array_equal( s_nx_tomo_2.instrument.detector.data, nx_tomo.instrument.detector.data[n_frames // 2 :], ) def test_spliter_virtual_sources_1(): """ test the splitter on a simulated h5py virtual dataset composed of two Virtual source. It must have the same virtual source in the two splitted NXtomo test the splitter on a h5py virtual dataset rotation_angle, [W]_translation and image_key datasets are handle are always numpy arrays not pointing to any external ressources. This is only the case for detector.data so this is the only dataset to test here """ nx_tomo = NXtomo("test_raw_data") nx_tomo.instrument.detector.data = [ h5py.VirtualSource("path_to_dataset_1", name="dataset_1", shape=[10, 100, 100]), h5py.VirtualSource("path_to_dataset_2", name="dataset_2", shape=[10, 100, 100]), ] splitter = NXtomoDetectorDataSplitter(nx_tomo=nx_tomo) splitted_nx_tomo = splitter.split(data_slice=slice(0, 20, 1), nb_part=2) assert len(splitted_nx_tomo) == 2 s_nx_tomo_1, s_nx_tomo_2 = splitted_nx_tomo det_dataset_1 = s_nx_tomo_1.instrument.detector.data det_dataset_2 = s_nx_tomo_2.instrument.detector.data assert len(det_dataset_1) == 1 assert len(det_dataset_2) == 1 det_dataset_vs1 = det_dataset_1[0] det_dataset_vs2 = det_dataset_2[0] assert isinstance(det_dataset_vs1, h5py.VirtualSource) assert det_dataset_vs1.path == "path_to_dataset_1" assert det_dataset_vs1.shape == (10, 100, 100) assert isinstance(det_dataset_vs2, h5py.VirtualSource) assert det_dataset_vs2.path == "path_to_dataset_2" assert det_dataset_vs2.shape == (10, 100, 100) def test_spliter_virtual_sources_2(): """ test the splitter on a h5py virtual dataset composed of a single Virtual source. Must split this one into two VirtualSource rotation_angle, [W]_translation and image_key datasets are handle are always numpy arrays not pointing to any external ressources. This is only the case for detector.data so this is the only dataset to test here """ nx_tomo = NXtomo("test_raw_data") nx_tomo.instrument.detector.data = [ h5py.VirtualSource( "path_to_dataset", name="path_to_dataset", shape=[20, 100, 100] ), ] splitter = NXtomoDetectorDataSplitter(nx_tomo=nx_tomo) splitted_nx_tomo = splitter.split(data_slice=slice(0, 20, 1), nb_part=2) assert len(splitted_nx_tomo) == 2 splitted_nx_tomo = splitter.split(data_slice=slice(0, 20, 1), nb_part=4) assert len(splitted_nx_tomo) == 4 def test_spliter_virtual_sources_3(): """ test the splitter on a concrete h5py virtual dataset rotation_angle, [W]_translation and image_key datasets are handle are always numpy arrays not pointing to any external ressources. This is only the case for detector.data so this is the only dataset to test here """ n_file = 5 n_frame_per_file = 20 layout = h5py.VirtualLayout( shape=(n_file * n_frame_per_file, 100, 100), dtype=float ) with TemporaryDirectory() as folder: for i_file in range(n_file): file_path = os.path.join(folder, f"file{i_file}.hdf5") data_path = f"path_to_dataset_{i_file}" with h5py.File(file_path, mode="w") as h5f: if i_file == 0: data = numpy.ones([n_frame_per_file, 100, 100]) elif i_file == n_file - 1: data = numpy.ones([n_frame_per_file, 100, 100]) * 2 else: start = i_file * 1000.0 stop = i_file * 1000.0 + (n_frame_per_file * 100 * 100) data = numpy.arange(start, stop).reshape(n_frame_per_file, 100, 100) h5f[data_path] = data vs = h5py.VirtualSource(h5f[data_path]) layout[i_file * n_frame_per_file : (i_file + 1) * n_frame_per_file] = vs master_file = os.path.join(folder, "master_file.hdf5") with h5py.File(master_file, mode="w") as h5f: h5f.create_virtual_dataset("data", layout) original_data = h5f["data"][()] nx_tomo = NXtomo("entry0000") with h5py.File(master_file, mode="r") as h5f: vs_ = [] for vs_info in h5f["data"].virtual_sources(): vs_.append( h5py.VirtualSource( vs_info.file_name, vs_info.dset_name, shape=(n_frame_per_file, 100, 100), ) ) nx_tomo.instrument.detector.data = vs_ splitter = NXtomoDetectorDataSplitter(nx_tomo=nx_tomo) data_slice = slice(10, n_frame_per_file * n_file - 10, 1) splitted_nx_tomo = splitter.split(data_slice=data_slice, nb_part=2) assert len(splitted_nx_tomo) == 2 # check the two dataset created s_nx_tomo_1, s_nx_tomo_2 = splitted_nx_tomo output_file_1 = os.path.join(folder, "output_file_1.nx") # data must contains a common section between the two nxtomo: the first 10 and last 10 frames # then the rest must be splitted between the two NXtomo assert len(s_nx_tomo_1.instrument.detector.data) == 5 assert s_nx_tomo_1.instrument.detector.data[0].shape[0] == 10 assert s_nx_tomo_1.instrument.detector.data[1].shape[0] == 10 assert s_nx_tomo_1.instrument.detector.data[2].shape[0] == 20 assert s_nx_tomo_1.instrument.detector.data[3].shape[0] == 10 assert s_nx_tomo_1.instrument.detector.data[4].shape[0] == 10 s_nx_tomo_1.save(output_file_1, "entry0000") output_file_2 = os.path.join(folder, "output_file_2.nx") assert len(s_nx_tomo_2.instrument.detector.data) == 5 assert s_nx_tomo_2.instrument.detector.data[0].shape[0] == 10 assert s_nx_tomo_2.instrument.detector.data[1].shape[0] == 10 assert s_nx_tomo_2.instrument.detector.data[2].shape[0] == 20 assert s_nx_tomo_2.instrument.detector.data[3].shape[0] == 10 assert s_nx_tomo_2.instrument.detector.data[4].shape[0] == 10 s_nx_tomo_2.save(output_file_2, "entry0000") # check final datasets are correctly formed with h5py.File(output_file_1, mode="r") as h5f: nx_1_data = h5f["entry0000/instrument/detector/data"][()] assert nx_1_data.shape[0] == 60 # check final datasets are correctly formed with h5py.File(output_file_2, mode="r") as h5f: nx_2_data = h5f["entry0000/instrument/detector/data"][()] assert nx_2_data.shape[0] == 60 # first 10 frames (common between the three nxtomo) numpy.testing.assert_array_equal( nx_1_data[0:10], nx_2_data[0:10], ) numpy.testing.assert_array_equal( nx_1_data[0:10], original_data[0:10], ) # last 10 frames (common between the three nxtomo) numpy.testing.assert_array_equal( nx_1_data[-10:], nx_2_data[-10:], ) numpy.testing.assert_array_equal( nx_1_data[-10:], original_data[-10:], ) # test nx_1_data unique region numpy.testing.assert_array_equal( nx_1_data[10:50], original_data[10:50], ) # test nx_2_data unique region numpy.testing.assert_array_equal( nx_2_data[10:50], original_data[50:90], ) def test_spliter_data_url(): """ test the splitter on a list of DataUrl rotation_angle, [W]_translation and image_key datasets are handle are always numpy arrays not pointing to any external ressources. This is only the case for detector.data so this is the only dataset to test here """ urls = [] n_frame_per_file = 20 n_file = 5 original_data = [] with TemporaryDirectory() as folder: for i_file in range(n_file): file_path = os.path.join(folder, f"file{i_file}.hdf5") data_path = f"path_to_dataset_{i_file}" with h5py.File(file_path, mode="w") as h5f: if i_file == 0: data = numpy.ones([n_frame_per_file, 100, 100]) elif i_file == n_file - 1: data = numpy.ones([n_frame_per_file, 100, 100]) * 2 else: start = i_file * 1000.0 stop = i_file * 1000.0 + (n_frame_per_file * 100 * 100) data = numpy.arange(start, stop).reshape(n_frame_per_file, 100, 100) h5f[data_path] = data original_data.append(data) urls.append( DataUrl( file_path=file_path, data_path=data_path, scheme="silx", ) ) original_data = numpy.concatenate(original_data) nx_tomo = NXtomo("entry0000") nx_tomo.instrument.detector.data = urls splitter = NXtomoDetectorDataSplitter(nx_tomo=nx_tomo) data_slice = slice(10, n_frame_per_file * n_file - 10, 1) data_slice = slice(10, n_frame_per_file * n_file - 10, 1) splitted_nx_tomo = splitter.split(data_slice=data_slice, nb_part=2) assert len(splitted_nx_tomo) == 2 # check the two dataset created s_nx_tomo_1, s_nx_tomo_2 = splitted_nx_tomo output_file_1 = os.path.join(folder, "output_file_1.nx") # data must contains a common section between the two nxtomo: the first 10 and last 10 frames # then the rest must be splitted between the two NXtomo def n_elmt(slice_): return slice_.stop - slice_.start assert len(s_nx_tomo_1.instrument.detector.data) == 5 assert n_elmt(s_nx_tomo_1.instrument.detector.data[0].data_slice()) == 10 assert n_elmt(s_nx_tomo_1.instrument.detector.data[1].data_slice()) == 10 assert n_elmt(s_nx_tomo_1.instrument.detector.data[2].data_slice()) == 20 assert n_elmt(s_nx_tomo_1.instrument.detector.data[3].data_slice()) == 10 assert n_elmt(s_nx_tomo_1.instrument.detector.data[4].data_slice()) == 10 s_nx_tomo_1.save(output_file_1, "entry0000") output_file_2 = os.path.join(folder, "output_file_2.nx") assert len(s_nx_tomo_2.instrument.detector.data) == 5 assert n_elmt(s_nx_tomo_2.instrument.detector.data[0].data_slice()) == 10 assert n_elmt(s_nx_tomo_2.instrument.detector.data[1].data_slice()) == 10 assert n_elmt(s_nx_tomo_2.instrument.detector.data[2].data_slice()) == 20 assert n_elmt(s_nx_tomo_2.instrument.detector.data[3].data_slice()) == 10 assert n_elmt(s_nx_tomo_2.instrument.detector.data[4].data_slice()) == 10 s_nx_tomo_2.save(output_file_2, "entry0000") # check final datasets are correctly formed with h5py.File(output_file_1, mode="r") as h5f: nx_1_data = h5f["entry0000/instrument/detector/data"][()] assert nx_1_data.shape[0] == 60 # check final datasets are correctly formed with h5py.File(output_file_2, mode="r") as h5f: nx_2_data = h5f["entry0000/instrument/detector/data"][()] assert nx_2_data.shape[0] == 60 # first 10 frames (common between the three nxtomo) numpy.testing.assert_array_equal( nx_1_data[0:10], nx_2_data[0:10], ) numpy.testing.assert_array_equal( nx_1_data[0:10], original_data[0:10], ) # last 10 frames (common between the three nxtomo) numpy.testing.assert_array_equal( nx_1_data[-10:], nx_2_data[-10:], ) numpy.testing.assert_array_equal( nx_1_data[-10:], original_data[-10:], ) # test nx_1_data unique region numpy.testing.assert_array_equal( nx_1_data[10:50], original_data[10:50], ) # test nx_2_data unique region numpy.testing.assert_array_equal( nx_2_data[10:50], original_data[50:90], ) def test_spliter_missing_projections(): """ If some projections are missing and if nb_turn cannot be used then 'tomo_n' will be used instead """ urls = [] n_frame_per_file = 20 n_file = 5 original_data = [] with TemporaryDirectory() as folder: for i_file in range(n_file): file_path = os.path.join(folder, f"file{i_file}.hdf5") data_path = f"path_to_dataset_{i_file}" with h5py.File(file_path, mode="w") as h5f: if i_file == 0: data = numpy.ones([n_frame_per_file, 100, 100]) elif i_file == n_file - 1: data = numpy.ones([n_frame_per_file, 100, 100]) * 2 else: start = i_file * 1000.0 stop = i_file * 1000.0 + (n_frame_per_file * 100 * 100) data = numpy.arange(start, stop).reshape(n_frame_per_file, 100, 100) h5f[data_path] = data original_data.append(data) urls.append( DataUrl( file_path=file_path, data_path=data_path, scheme="silx", ) ) original_data = numpy.concatenate(original_data) nx_tomo = NXtomo("entry0000") nx_tomo.instrument.detector.data = urls splitter = NXtomoDetectorDataSplitter(nx_tomo=nx_tomo) data_slice = slice(0, 100, 1) data_slice = slice(0, 100, 1) splitted_nx_tomo = splitter.split(data_slice=data_slice, nb_part=2) assert len(splitted_nx_tomo) == 2 splitted_nx_tomo = splitter.split( data_slice=data_slice, nb_part=None, tomo_n=20 ) assert len(splitted_nx_tomo) == 5 splitted_nx_tomo = splitter.split( data_slice=data_slice, nb_part=None, tomo_n=40 ) assert len(splitted_nx_tomo) == 3 splitted_nx_tomo = splitter.split( data_slice=data_slice, nb_part=None, tomo_n=65 ) assert len(splitted_nx_tomo) == 2 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757728.0 nxtomo-1.3.0.dev9/nxtomo/utils/tests/test_transformation.py0000644000175000017500000001715414676676640023346 0ustar00paynopaynoimport pytest import numpy from nxtomo.utils.transformation import ( Transformation, TransformationAxis, TransformationType, DetYFlipTransformation, DetZFlipTransformation, GravityTransformation, get_lr_flip, get_ud_flip, build_matrix, ) from nxtomo.paths.nxtransformations import NEXUS_TRANSFORMATIONS_PATH def test_Transformation(): """ test Transformation class """ transformation_translation = Transformation( axis_name="tz", value=12.2, transformation_type="translation", vector=TransformationAxis.AXIS_Z, ) # test defining units transformation_translation = Transformation( axis_name="tx", value=45, transformation_type=TransformationType.TRANSLATION, vector=(0, 1, 0), ) with pytest.raises(ValueError): transformation_translation.units = "degree" transformation_translation.units = "cm" transformation_rotation = Transformation( axis_name="rx", value=(45, 56, 89), transformation_type="rotation", vector=TransformationAxis.AXIS_X, ) with pytest.raises(ValueError): transformation_rotation.units = "cm" transformation_rotation.units = "degree" # make sure the API is freezed with pytest.raises(AttributeError): transformation_rotation.toto = "test" # test from / to dict functions transformations_nexus_paths = NEXUS_TRANSFORMATIONS_PATH assert transformation_translation == Transformation.from_nx_dict( axis_name=transformation_translation.axis_name, dict_=transformation_translation.to_nx_dict( transformations_nexus_paths=transformations_nexus_paths, data_path="", ), transformations_nexus_paths=transformations_nexus_paths, ) assert transformation_rotation == Transformation.from_nx_dict( axis_name=transformation_rotation.axis_name, dict_=transformation_rotation.to_nx_dict( transformations_nexus_paths=transformations_nexus_paths, data_path="", ), transformations_nexus_paths=transformations_nexus_paths, ) def test_helpers(): """simple test on some helper class / function""" DetYFlipTransformation(flip=True) DetZFlipTransformation(flip=True) def test_get_lr_flip() -> tuple: """ test `get_lr_flip` function """ trans_as_rad = Transformation( axis_name="rad_rot", transformation_type="rotation", value=numpy.pi, vector=TransformationAxis.AXIS_Z, ) trans_as_rad.units = "rad" assert trans_as_rad == DetZFlipTransformation(flip=True) transformations = ( DetZFlipTransformation(flip=True), Transformation( axis_name="toto", transformation_type="rotation", value=-180, vector=TransformationAxis.AXIS_Z, ), Transformation( axis_name="other", transformation_type="rotation", value=70, vector=TransformationAxis.AXIS_Z, ), trans_as_rad, Transformation( axis_name="other2", transformation_type="rotation", value=180, vector=TransformationAxis.AXIS_Y, ), ) assert get_lr_flip(transformations=transformations) == ( DetZFlipTransformation(flip=True), Transformation( axis_name="toto", transformation_type="rotation", value=-180, vector=TransformationAxis.AXIS_Z, ), trans_as_rad, ) def test_get_ud_flip() -> tuple: """ test `get_ud_flip` function """ transformations = ( Transformation( axis_name="other", transformation_type="rotation", value=70, vector=TransformationAxis.AXIS_Z, ), Transformation( axis_name="toto", transformation_type="rotation", value=-180, vector=TransformationAxis.AXIS_Y, ), DetYFlipTransformation(flip=True), Transformation( axis_name="other2", transformation_type="rotation", value=180, vector=TransformationAxis.AXIS_Y, ), DetZFlipTransformation(flip=True), ) assert get_ud_flip(transformations=transformations) == ( Transformation( axis_name="toto", transformation_type="rotation", value=-180, vector=TransformationAxis.AXIS_Y, ), DetYFlipTransformation(flip=True), Transformation( axis_name="other2", transformation_type="rotation", value=180, vector=TransformationAxis.AXIS_Y, ), ) def test_transformation_as_matrix(): """ test Transformation().as_matrix() function """ numpy.testing.assert_array_equal( DetYFlipTransformation(flip=True).as_matrix(), numpy.array( [ [numpy.cos(numpy.pi), 0, numpy.sin(numpy.pi)], [0, 1, 0], [-numpy.sin(numpy.pi), 0, numpy.cos(numpy.pi)], ], dtype=numpy.float32, ), ) numpy.testing.assert_array_equal( DetZFlipTransformation(flip=True).as_matrix(), numpy.array( [ [numpy.cos(numpy.pi), -numpy.sin(numpy.pi), 0], [numpy.sin(numpy.pi), numpy.cos(numpy.pi), 0], [0, 0, 1], ], dtype=numpy.float32, ), ) with pytest.raises(ValueError): Transformation( axis_name="rx", transformation_type="rotation", value=None, vector=(1, 0, 0), ).as_matrix() with pytest.raises(ValueError): Transformation( axis_name="rx", transformation_type="rotation", value=None, vector=(1, 0, 0), ).as_matrix() with pytest.raises(ValueError): Transformation( axis_name="rx", transformation_type="rotation", value=1, vector=(0, 0, 0), ).as_matrix() def test_build_matrix(): """ """ gravity = GravityTransformation() rz = DetZFlipTransformation(flip=True, depends_on="gravity") ry = DetYFlipTransformation(flip=True, depends_on="rz") tx = Transformation( axis_name="tx", transformation_type=TransformationType.TRANSLATION, depends_on="ry", vector=TransformationAxis.AXIS_X, value=5, ) expected_result = numpy.matmul( numpy.matmul( numpy.array( [ [numpy.cos(numpy.pi), -numpy.sin(numpy.pi), 0], [numpy.sin(numpy.pi), numpy.cos(numpy.pi), 0], [0, 0, 1], ], dtype=numpy.float32, ), numpy.array( [ [numpy.cos(numpy.pi), 0, numpy.sin(numpy.pi)], [0, 1, 0], [-numpy.sin(numpy.pi), 0, numpy.cos(numpy.pi)], ], dtype=numpy.float32, ), ), numpy.array( [ [5, 0, 0], [0, 1, 0], [0, 0, 1], ], dtype=numpy.float32, ), ) numpy.testing.assert_array_almost_equal( expected_result, build_matrix([gravity, rz, ry, tx]), ) # test incoherence on the resolution chain rz2 = DetZFlipTransformation(flip=True, depends_on="unkmow axis") with pytest.raises(ValueError): build_matrix([gravity, rz2, ry, tx]), ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757728.0 nxtomo-1.3.0.dev9/nxtomo/utils/transformation.py0000644000175000017500000005537214676676640021151 0ustar00paynopayno"""module to provide helper classes to define transformations contained in NXtransformations""" from __future__ import annotations import logging import numpy from pyunitsystem.metricsystem import MetricSystem from silx.utils.enum import Enum as _Enum from nxtomo.utils.io import deprecated_warning _logger = logging.getLogger(__name__) __all__ = [ "TransformationType", "TransformationAxis", "Transformation", "DetYFlipTransformation", "UDDetTransformation", "DetZFlipTransformation", "LRDetTransformation", "GravityTransformation", "get_lr_flip", "get_ud_flip", "build_matrix", ] class TransformationType(_Enum): """ possible NXtransformations types """ TRANSLATION = "translation" ROTATION = "rotation" class TransformationAxis: """ Some predefined axis for tomography acquisition done at esrf. Warning those are stored as (X, Y, Z) and not under the usual numpy reference (Z, Y, X) space is defined here: https://tomo.gitlab-pages.esrf.fr/ebs-tomo/master/modelization.html """ AXIS_X = (1, 0, 0) AXIS_Y = (0, 1, 0) AXIS_Z = (0, 0, 1) class Transformation: """ Define a Transformation done on an axis :param axis_name: name of the transformation. :param transformation_type: type of the formation. As unit depends on the type of transformation this is not possible to modify it once created :param vector: vector of the transformation. Expected as a tuple of three values that define the axis for this transformation. Can also be an instance of TransformationAxis predefining some default axis :param depends_on: used to determine transformation chain. If depends on no other transformation then should be considered as if it is depending on "gravity" only. :warning: when convert a rotation which as 'radian' as units it will be cast to degree """ __isfrozen = False # to ease API and avoid setting wrong attributes we 'freeze' the attributes # see https://stackoverflow.com/questions/3603502/prevent-creating-new-attributes-outside-init def __init__( self, axis_name: str, value, transformation_type: TransformationType, vector: tuple[float, float, float] | TransformationAxis, depends_on: str | None = None, ) -> None: self._axis_name = axis_name self._transformation_values = None self.transformation_values = value self._transformation_type = TransformationType.from_value(transformation_type) self._units = ( MetricSystem.METER if self._transformation_type is TransformationType.TRANSLATION else "degree" ) if isinstance(vector, TransformationAxis): self._vector = vector.value() elif not isinstance(vector, (tuple, list, numpy.ndarray)) or len(vector) != 3: raise TypeError( f"vector should be a tuple of three elements. {vector} provided" ) else: self._vector = tuple(vector) assert len(self._vector) == 3, "" self._offset = (0, 0, 0) self._depends_on = None self.depends_on = depends_on self._equipment_component = None self._set_freeze() def _set_freeze(self, freeze=True): self.__isfrozen = freeze @property def axis_name(self) -> str: return self._axis_name @axis_name.setter def axis_name(self, axis_name: str): self._axis_name = axis_name @property def transformation_values(self): return self._transformation_values @transformation_values.setter def transformation_values(self, values): if values is not None and not numpy.isscalar(values): self._transformation_values = numpy.array(values) else: self._transformation_values = values @property def transformation_type(self) -> TransformationType: return self._transformation_type @property def units(self): return self._units @units.setter def units(self, units: str | MetricSystem): """ :raises ValueError: if units is invalid (depends on the transformation type). """ if units == "m/s2": # corner cases of the gravity self._units = units elif self._transformation_type is TransformationType.TRANSLATION: self._units = MetricSystem.from_value(units) elif units in ("rad", "radian", "radians"): self._units = "rad" elif units in ("degree", "degrees"): self._units = "degree" else: raise ValueError(f"Unrecognized unit {units}") @property def vector(self) -> tuple[float, float, float]: return self._vector @property def offset(self) -> tuple: return self._offset @offset.setter def offset(self, offset: tuple | list | numpy.ndarray): if not isinstance(offset, (tuple, list, numpy.ndarray)): raise TypeError( f"offset is expected to be a vector of three elements. {type(offset)} provided" ) elif not len(offset) == 3: raise TypeError( f"offset is expected to be a vector of three elements. {offset} provided" ) self._offset = tuple(offset) @property def depends_on(self): return self._depends_on @depends_on.setter def depends_on(self, depends_on): """ :param depends_on: """ if not (depends_on is None or isinstance(depends_on, str)): raise TypeError( f"offset is expected to be None or str. {type(depends_on)} provided" ) self._depends_on = depends_on @property def equipment_component(self) -> str | None: return self._equipment_component @equipment_component.setter def equipment_component(self, equipment_component: str | None): if not (equipment_component is None or isinstance(equipment_component, str)): raise TypeError( f"equipment_component is expect to ne None or a str. {type(equipment_component)} provided" ) self._equipment_component = equipment_component def get_transformation_values_in_common_unit(self): transformation_values = self.transformation_values units = self.units if units in ("radian", "rad", "rads", "radians"): if transformation_values is None: return None, "degree" else: transformation_values = numpy.rad2deg(transformation_values) return transformation_values % 360, "degree" elif units in ("degree", "degrees"): if transformation_values is None: return None, "degree" else: return transformation_values % 360, "degree" elif units == "m/s2": return transformation_values, "m/s2" else: converted_values = ( transformation_values * MetricSystem.from_str(str(units)).value ) return converted_values, MetricSystem.METER def to_nx_dict(self, transformations_nexus_paths, data_path: str): def join(my_list): # filter empty strings my_list = tuple( filter( lambda a: bool( a ), # return False if a is an empty string else True, my_list, ) ) if len(my_list) == 0: return "" else: return "/".join(my_list) transformation_values = self.transformation_values if transformation_values is None: _logger.error(f"no values defined for {self.axis_name}") elif numpy.isscalar(transformation_values): pass else: transformation_values = numpy.array(transformation_values) units = self.units if units == "radian": if transformation_values is not None: transformation_values = numpy.rad2deg(transformation_values) units = "degree" elif isinstance(units, MetricSystem): units = str(units) res = { join((data_path, self.axis_name)): self.transformation_values, join( ( data_path, self.axis_name + transformations_nexus_paths.TRANSFORMATION_TYPE, ) ): self.transformation_type.value, join((data_path, f"{self.axis_name}@units")): units, } # vector is mandatory res[ join((data_path, f"{self.axis_name}{transformations_nexus_paths.VECTOR}")) ] = self.vector if self.offset is not None: res[ join( (data_path, f"{self.axis_name}{transformations_nexus_paths.OFFSET}") ) ] = self.offset if self.depends_on: res[ join( ( data_path, f"{self.axis_name}{transformations_nexus_paths.DEPENDS_ON}", ) ) ] = self.depends_on if self.equipment_component: res[ join( ( data_path, f"{self.axis_name}{transformations_nexus_paths.EQUIPMENT_COMPONENT}", ) ) ] = self.equipment_component return res @staticmethod def from_nx_dict(axis_name: str, dict_: dict, transformations_nexus_paths): if transformations_nexus_paths is None: _logger.warning( "no transformations_nexus_paths (not implemented on this version of nexus - too old)" ) return None value = dict_.get(axis_name, None) # if this is a scalar store as an array move it back to an array if isinstance(value, numpy.ndarray) and value.ndim == 0: value = value[()] vector = dict_.get(f"{axis_name}{transformations_nexus_paths.VECTOR}", None) transformation_type = dict_.get( f"{axis_name}{transformations_nexus_paths.TRANSFORMATION_TYPE}", None ) if vector is None or transformation_type is None: raise ValueError( "Unable to find mandatory vector and/or transformation_type" ) transformation = Transformation( axis_name=axis_name, value=value, transformation_type=transformation_type, vector=vector, ) units = dict_.get(f"{axis_name}@units", None) or dict_.get( "{axis_name}@unit", None ) if units is not None: transformation.units = units offset = dict_.get(f"{axis_name}{transformations_nexus_paths.OFFSET}", None) if offset is not None: transformation.offset = offset depends_on = dict_.get( f"{axis_name}{transformations_nexus_paths.DEPENDS_ON}", None ) if depends_on is not None: transformation.depends_on = depends_on equipment_component = dict_.get( f"{axis_name}{transformations_nexus_paths.EQUIPMENT_COMPONENT}", None ) if equipment_component is not None: transformation.equipment_component = equipment_component return transformation def __setattr__(self, __name, __value): if self.__isfrozen and not hasattr(self, __name): raise AttributeError("can't set attribute", __name) else: super().__setattr__(__name, __value) def __eq__(self, __value: object) -> bool: if not isinstance(__value, Transformation): return False else: same_dependence = self._depends_on == __value.depends_on or ( self._depends_on in (None, GravityTransformation(), "gravity") and __value._depends_on in (None, GravityTransformation(), "gravity") ) if not ( self.vector == __value.vector and self.transformation_type == __value.transformation_type and self.offset == __value.offset and same_dependence and self.equipment_component == __value.equipment_component ): return False else: values_a, units_a = self.get_transformation_values_in_common_unit() values_b, units_b = __value.get_transformation_values_in_common_unit() if values_a is None or values_b is None: return (values_a is values_b) and (units_a == units_b) elif units_a != units_b: return False if isinstance(values_a, numpy.ndarray) and isinstance( values_b, numpy.ndarray ): return numpy.array_equal(values_a, values_b) else: return values_a == values_b def as_matrix(self): # handle the transformation in detector space if self.transformation_values is None: raise ValueError(f"missing transformation values for {self}") elif numpy.isscalar(self.transformation_values): if self.transformation_type is TransformationType.ROTATION: if self.units in ("rad", "rads", "radian", "radians"): theta = self.transformation_values elif self.units in ("deg", "degree", "degs", "degrees"): theta = numpy.deg2rad(self.transformation_values) else: raise ValueError(f"unknow unit: {self.units}") if self.offset != (0, 0, 0): raise ValueError("offset not handled") if self.vector == (1, 0, 0): return numpy.array( [ [ 1, 0, 0, ], [0, numpy.cos(theta), -numpy.sin(theta)], [0, numpy.sin(theta), -numpy.cos(theta)], ], dtype=numpy.float32, ) elif self.vector == (0, 1, 0): return numpy.array( [ [numpy.cos(theta), 0, numpy.sin(theta)], [0, 1, 0], [-numpy.sin(theta), 0, numpy.cos(theta)], ], dtype=numpy.float32, ) elif self.vector == (0, 0, 1): return numpy.array( [ [numpy.cos(theta), -numpy.sin(theta), 0], [numpy.sin(theta), numpy.cos(theta), 0], [0, 0, 1], ], dtype=numpy.float32, ) else: raise ValueError(f"vector {self.vector} not handled") elif self.transformation_type is TransformationType.TRANSLATION: if self.vector == (1, 0, 0): return numpy.array( [ [ self.transformation_values, 0, 0, ], [0, 1, 0], [0, 0, 1], ], dtype=numpy.float32, ) elif self.vector == (0, 1, 0): return numpy.array( [ [1, 0, 0], [0, self.transformation_values, 0], [0, 0, 1], ], dtype=numpy.float32, ) elif self.vector == (0, 0, 1): return numpy.array( [ [1, 0, 0], [0, 1, 0], [0, 0, self.transformation_values], ], dtype=numpy.float32, ) else: raise RuntimeError( f"unknow transformation type: {self.transformation_type}" ) else: raise ValueError( f"transformations as a list of values is not handled for now ({self})" ) def __str__(self): return f"transformation: {self.axis_name} -" + ", ".join( [ f"type: {self.transformation_type.value}", f"value: {self.transformation_values}", f"vector: {self.vector}", f"offset: {self.offset}", f"depends_on: {self.depends_on}", f"equipment_component: {self.equipment_component}", ] ) class DetYFlipTransformation(Transformation): """ convenient class to define a detector up-down flip if we consider the center of the detector to be at (0, 0) """ def __init__( self, flip: bool, axis_name="ry", depends_on=None, ) -> None: value = 180 if flip else 0 super().__init__( axis_name=axis_name, value=value, transformation_type=TransformationType.ROTATION, vector=TransformationAxis.AXIS_Y, depends_on=depends_on, ) class UDDetTransformation(DetYFlipTransformation): def __init__( self, axis_name="ry", depends_on=None, value=180, ): deprecated_warning( type_="class", name="UDDetTransformation", replacement="DetYFlipTransformation", since_version="1.3", reason="Detector rotation can now be 0 degree.", ) super().__init__(flip=True, axis_name=axis_name, depends_on=depends_on) class DetZFlipTransformation(Transformation): """ convenient class to define a detector up-down flip if we consider the center of the detector to be at (0, 0) """ def __init__( self, flip: bool, axis_name="rz", depends_on=None, ) -> None: value = 180 if flip else 0 super().__init__( axis_name=axis_name, value=value, transformation_type=TransformationType.ROTATION, vector=TransformationAxis.AXIS_Z, depends_on=depends_on, ) class LRDetTransformation(DetZFlipTransformation): def __init__( self, axis_name="rz", depends_on=None, value=180, ): deprecated_warning( type_="class", name="LRDetTransformation", replacement="DetZFlipTransformation", since_version="1.3", reason="Detector rotation can now be 0 degree.", ) super().__init__(flip=True, axis_name=axis_name, depends_on=depends_on) class GravityTransformation(Transformation): """ Gravity is used to solve transformation chain (as chain 'endpoint') """ def __init__(self) -> None: super().__init__( axis_name="gravity", value=numpy.nan, transformation_type=TransformationType.TRANSLATION, vector=(0, 0, -1), ) self.units = "m/s2" def get_lr_flip(transformations: tuple) -> tuple: """ check along all transformations if find Transformation matching 'LRTransformation' return a tuple with all matching keys """ if not isinstance(transformations, (tuple, list)): raise TypeError( f"transformations is expected to be a tuple. {type(transformations)} provided" ) res = [] for transformation in transformations: if transformation in ( DetZFlipTransformation(flip=True), DetZFlipTransformation(flip=False), ): res.append(transformation) return tuple(res) def get_ud_flip(transformations: tuple) -> tuple: """ check along all transformations if find Transformation matching 'UDTransformation' return a tuple with all matching keys """ if not isinstance(transformations, (tuple, list)): raise TypeError( f"transformations is expected to be a tuple. {type(transformations)} provided" ) res = [] for transformation in transformations: if transformation in ( DetYFlipTransformation(flip=True), DetYFlipTransformation(flip=False), ): res.append(transformation) return tuple(res) def build_matrix(transformations: set): """ build a matrice from a set of Transformation """ transformations = { transformation.axis_name: transformation for transformation in transformations } already_applied_transformations = set(["gravity"]) def handle_transformation(transformation: Transformation, matrix): if not isinstance(transformation, Transformation): raise TypeError( f"transformation is expected to be an instance of {Transformation}. {type(transformation)} provided" ) # handle dependancies if transformation.axis_name in already_applied_transformations: # case already applied return matrix elif transformation.transformation_values is None: # case of the gravity matrix if transformation.axis_name.lower() == "gravity": return numpy.identity(3, dtype=numpy.float32) else: _logger.error( f"transformation value not provided for {transformation.axis_name}. Ignore the transformation" ) return matrix elif ( transformation.depends_on is not None and transformation.depends_on not in already_applied_transformations ): if transformation.depends_on not in transformations: raise ValueError( f"Unable to find transformation {transformation.depends_on}. Unable to build matrix. reason is: broken dependancy chain" ) else: matrix = handle_transformation( transformations[transformation.depends_on], matrix ) matrix = numpy.matmul(matrix, transformation.as_matrix()) already_applied_transformations.add(transformation.axis_name) return matrix matrix = numpy.identity(3, dtype=numpy.float32) for transformation in transformations.values(): matrix = handle_transformation(transformation, matrix) return matrix ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1728994270.0 nxtomo-1.3.0.dev9/nxtomo/utils/utils.py0000644000175000017500000001361614703455736017226 0ustar00paynopayno"""general utils""" from __future__ import annotations from typing import Iterable import h5py import numpy import os from silx.io.utils import h5py_read_dataset from silx.io.utils import open as hdf5_open from nxtomo.io import to_target_rel_path try: import tifffile # noqa F401 except ImportError: has_tiffile = False else: from tifffile import TiffFile has_tiffile = True __all__ = ["cast_and_check_array_1D", "get_data_and_unit", "get_data"] def cast_and_check_array_1D(array, array_name: str): """ cast provided array to 1D :param array: array to be cast to 1D :param array_name: name of the array - used for log only """ if not isinstance(array, (type(None), numpy.ndarray, Iterable)): raise TypeError( f"{array_name} is expected to be None, or an Iterable. Not {type(array)}" ) if array is not None and not isinstance(array, numpy.ndarray): array = numpy.asarray(array) if array is not None and array.ndim > 1: raise ValueError(f"{array_name} is expected to be 0 or 1d not {array.ndim}") return array def get_data_and_unit(file_path: str, data_path: str, default_unit): """ return for an HDF5 dataset his value and his unit. If unit cannot be found then fallback on the 'default_unit' :param file_path: file path location of the HDF5Dataset to read :param data_path: data_path location of the HDF5Dataset to read :param default_unit: default unit to fall back if the dataset has no 'unit' or 'units' attribute """ with hdf5_open(file_path) as h5f: if data_path in h5f and isinstance(h5f[data_path], h5py.Dataset): dataset = h5f[data_path] unit = None if "unit" in dataset.attrs: unit = dataset.attrs["unit"] elif "units" in dataset.attrs: unit = dataset.attrs["units"] else: unit = default_unit if hasattr(unit, "decode"): # handle Diamond dataset unit = unit.decode() return h5py_read_dataset(dataset), unit else: return None, default_unit def get_data(file_path: str, data_path: str): """ proxy to h5py_read_dataset, handling use case 'data_path' not present in the file. In this case return None :param file_path: file path location of the HDF5Dataset to read :param data_path: data_path location of the HDF5Dataset to read """ with hdf5_open(file_path) as h5f: if data_path in h5f: return h5py_read_dataset(h5f[data_path]) else: return None def create_detector_dataset_from_tiff( tiff_files: tuple, external_dataset_group: h5py.Group, external_dataset_prefix="frame_", dtype=None, relative_link: bool = True, ) -> tuple[h5py.VirtualSource]: """ create a series of externals datasets to tiff file (one per file) inside the 'external_dataset_group' :param tiff_files: set of files to create virtual sources to :param external_dataset_group: output HDF5 group. File must be accessible with write access (mode in 'w', 'a'...) :param dtype: expected dtype of all the tiff data. If not provided will be deduced from the first dataset. :param relative_link: if true create the link using relative link else use absolute path. .. warning:: The most robust way to create a NXtomo should go by using relative link (in order to share it with the .tif files). Nevertheless there is today limitation on the resolution of the relative link with external dataset. (resolution is done according to the current working directory instead of the file...). The tomotools will handle it anyway but other software might not (like silx as this is a workaround and it should be handled at HDF5 level...) So be aware that those links might 'appear' broken when using relative link. This won't happen when using absolute links... """ if not has_tiffile: raise RuntimeError("tiff file not installed") external_datasets = [] # convert from local to ... for i_file, tiff_file in enumerate(tiff_files): with TiffFile(tiff_file, mode="r") as tif: fh = tif.filehandle for page in tif.pages: if dtype is not None: assert dtype == page.dtype, "incoherent data type" dtype = page.dtype for index, (offset, bytecount) in enumerate( zip(page.dataoffsets, page.databytecounts) ): _ = fh.seek(offset) data = fh.read(bytecount) _, _, shape = page.decode(data, index, jpegtables=page.jpegtables) if len(shape) == 4: # don't know why but return it as 4D when 2D expected... shape = shape[0:-1] elif len(shape) == 2: shape = 1, *shape # move tiff file path to relative path if relative_link: external_file_path = to_target_rel_path( file_path=tiff_file, target_path=external_dataset_group.file.filename, ) else: external_file_path = os.path.abspath(tiff_file) external_dataset = external_dataset_group.create_dataset( name=f"{external_dataset_prefix}{str(i_file).zfill(6)}", shape=shape, dtype=dtype, external=[(external_file_path, offset, bytecount)], ) external_datasets.append(external_dataset) virtual_sources = [] for i, ed in enumerate(external_datasets): vsource = h5py.VirtualSource(ed) virtual_sources.append(vsource) return tuple(virtual_sources) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729922977.0 nxtomo-1.3.0.dev9/nxtomo/version.py0000644000175000017500000000005514707103641016371 0ustar00paynopaynoversion = "1.3.0dev9" """software version""" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1729922990.4957538 nxtomo-1.3.0.dev9/nxtomo.egg-info/0000755000175000017500000000000014707103656016032 5ustar00paynopayno././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729922990.0 nxtomo-1.3.0.dev9/nxtomo.egg-info/PKG-INFO0000644000175000017500000001003314707103656017124 0ustar00paynopaynoMetadata-Version: 2.1 Name: nxtomo Version: 1.3.0.dev9 Summary: module to create / edit NXtomo application Author-email: Henri Payno , Pierre Paleo , Alessandro Mirone , Jérôme Lesaint , Pierre-Olivier Autran License: The nxtomo library goal is to provide a powerful python interface to read / write nexus NXtomo application nxtomo is distributed under the MIT license. The MIT license follows: Copyright (c) European Synchrotron Radiation Facility (ESRF) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Project-URL: Homepage, https://gitlab.esrf.fr/tomotools/nxtomo Project-URL: Documentation, https://gitlab.esrf.fr/tomotools/nxtomo/pages Project-URL: Repository, https://gitlab.esrf.fr/tomotools/nxtomo Project-URL: Changelog, https://gitlab.esrf.fr/tomotools/nxtomo/-/blob/master/CHANGELOG.md Keywords: NXtomo,nexus,tomography,tomotools,esrf Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Science/Research Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Environment :: Console Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: Unix Classifier: Operating System :: MacOS :: MacOS X Classifier: Operating System :: POSIX Classifier: Topic :: Scientific/Engineering :: Physics Classifier: Topic :: Scientific/Engineering :: Medical Science Apps. Requires-Python: >=3.7 Description-Content-Type: text/markdown License-File: LICENSE Requires-Dist: numpy<2.0 Requires-Dist: h5py>=3.0 Requires-Dist: silx>=2.0 Requires-Dist: pyunitsystem>=2.0.0a Requires-Dist: packaging Provides-Extra: test Requires-Dist: pytest; extra == "test" Provides-Extra: doc Requires-Dist: Sphinx<5.2.0,>=4.0.0; extra == "doc" Requires-Dist: nbsphinx; extra == "doc" Requires-Dist: jupyterlab; extra == "doc" Requires-Dist: ipykernel; extra == "doc" Requires-Dist: nbconvert; extra == "doc" Requires-Dist: pandoc; extra == "doc" Requires-Dist: scikit-image; extra == "doc" Requires-Dist: h5glance; extra == "doc" Requires-Dist: jupyter_client; extra == "doc" Requires-Dist: pydata_sphinx_theme; extra == "doc" Requires-Dist: sphinx_autodoc_typehints; extra == "doc" # nxtomo the goal of this project is to provide a powerful and user friendly API to create and edit [NXtomo](https://manual.nexusformat.org/classes/applications/NXtomo.html) application Please find at https://tomotools.gitlab-pages.esrf.fr/nxtomo the latest documentation Tutorials are avaible here: https://tomotools.gitlab-pages.esrf.fr/nxtomo/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729922990.0 nxtomo-1.3.0.dev9/nxtomo.egg-info/SOURCES.txt0000644000175000017500000000255114707103656017721 0ustar00paynopaynoLICENSE README.md pyproject.toml setup.py doc/conf.py nxtomo/__init__.py nxtomo/io.py nxtomo/version.py nxtomo.egg-info/PKG-INFO nxtomo.egg-info/SOURCES.txt nxtomo.egg-info/dependency_links.txt nxtomo.egg-info/requires.txt nxtomo.egg-info/top_level.txt nxtomo/application/nxtomo.py nxtomo/application/tests/test_nxtomo.py nxtomo/nxobject/__init__.py nxtomo/nxobject/nxdetector.py nxtomo/nxobject/nxinstrument.py nxtomo/nxobject/nxmonitor.py nxtomo/nxobject/nxobject.py nxtomo/nxobject/nxsample.py nxtomo/nxobject/nxsource.py nxtomo/nxobject/nxtransformations.py nxtomo/nxobject/utils.py nxtomo/nxobject/tests/test_nxdetector.py nxtomo/nxobject/tests/test_nxinstrument.py nxtomo/nxobject/tests/test_nxmonitor.py nxtomo/nxobject/tests/test_nxobject.py nxtomo/nxobject/tests/test_nxsample.py nxtomo/nxobject/tests/test_nxsource.py nxtomo/nxobject/tests/test_nxtransformations.py nxtomo/paths/__init__.py nxtomo/paths/nxdetector.py nxtomo/paths/nxinstrument.py nxtomo/paths/nxmonitor.py nxtomo/paths/nxsample.py nxtomo/paths/nxsource.py nxtomo/paths/nxtomo.py nxtomo/paths/nxtransformations.py nxtomo/paths/tests/test_backward_compatibility.py nxtomo/utils/__init__.py nxtomo/utils/detectorsplitter.py nxtomo/utils/frameappender.py nxtomo/utils/io.py nxtomo/utils/transformation.py nxtomo/utils/utils.py nxtomo/utils/tests/test_detectorsplitter.py nxtomo/utils/tests/test_transformation.py././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729922990.0 nxtomo-1.3.0.dev9/nxtomo.egg-info/dependency_links.txt0000644000175000017500000000000114707103656022100 0ustar00paynopayno ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729922990.0 nxtomo-1.3.0.dev9/nxtomo.egg-info/requires.txt0000644000175000017500000000035114707103656020431 0ustar00paynopaynonumpy<2.0 h5py>=3.0 silx>=2.0 pyunitsystem>=2.0.0a packaging [doc] Sphinx<5.2.0,>=4.0.0 nbsphinx jupyterlab ipykernel nbconvert pandoc scikit-image h5glance jupyter_client pydata_sphinx_theme sphinx_autodoc_typehints [test] pytest ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1729922990.0 nxtomo-1.3.0.dev9/nxtomo.egg-info/top_level.txt0000644000175000017500000000003314707103656020560 0ustar00paynopaynobuild dist doc html nxtomo ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1727757723.0 nxtomo-1.3.0.dev9/pyproject.toml0000644000175000017500000000451414676676633015753 0ustar00paynopayno[build-system] requires = ["setuptools>=61.0", "wheel"] build-backend = "setuptools.build_meta" [project] name = "nxtomo" authors = [ {name = "Henri Payno", email = "henri.payno@esrf.fr"}, {name = "Pierre Paleo", email = "pierre.paleo@esrf.fr"}, {name = "Alessandro Mirone", email = "mirone@esrf.fr"}, {name = "Jérôme Lesaint", email = "jerome.lesaint@esrf.fr"}, {name = "Pierre-Olivier Autran", email = "pierre-olivier.autran@esrf.fr"}, ] dynamic = ["version"] description = "module to create / edit NXtomo application" readme = "README.md" requires-python = ">=3.7" keywords = ["NXtomo", "nexus", "tomography", "tomotools", "esrf"] license = {file = "LICENSE"} classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Environment :: Console", "License :: OSI Approved :: MIT License", "Operating System :: Unix", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX", "Topic :: Scientific/Engineering :: Physics", "Topic :: Scientific/Engineering :: Medical Science Apps.", ] dependencies = [ "numpy<2.0", "h5py>=3.0", "silx>=2.0", "pyunitsystem>=2.0.0a", "packaging", ] [project.urls] Homepage = "https://gitlab.esrf.fr/tomotools/nxtomo" Documentation = "https://gitlab.esrf.fr/tomotools/nxtomo/pages" Repository = "https://gitlab.esrf.fr/tomotools/nxtomo" Changelog = "https://gitlab.esrf.fr/tomotools/nxtomo/-/blob/master/CHANGELOG.md" [project.optional-dependencies] test = [ "pytest", ] doc = [ "Sphinx>=4.0.0,<5.2.0", "nbsphinx", "jupyterlab", "ipykernel", "nbconvert", "pandoc", "scikit-image", "h5glance", "jupyter_client", "pydata_sphinx_theme", "sphinx_autodoc_typehints", ] [build_sphinx] source_dir = "doc" build_dir = "build/sphinx" [tool.setuptools.dynamic] version = {attr = "nxtomo.version.version"} [tool.setuptools.packages.find] where = ["."] # list of folders that contain the packages (["."] by default) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1729922990.4957538 nxtomo-1.3.0.dev9/setup.cfg0000644000175000017500000000004614707103656014635 0ustar00paynopayno[egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1710441677.0 nxtomo-1.3.0.dev9/setup.py0000644000175000017500000000004614574642315014530 0ustar00paynopaynofrom setuptools import setup setup()